diff --git a/.gitattributes b/.gitattributes index 4999f2dc912ff7fdb6e979c8f0fe30763b58a17a..e22461f0813e36ecbdefda1e2da61216ba3b668d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -53,3 +53,5 @@ pythonProject/.venv/Lib/site-packages/functorch/_C.cp310-win_amd64.pyd filter=lf pythonProject/.venv/Lib/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text pythonProject/.venv/Lib/site-packages/numpy/fft/_pocketfft_umath.cp310-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text pythonProject/.venv/Lib/site-packages/numpy/random/_common.cp310-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text +pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +pythonProject/.venv/Lib/site-packages/numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll filter=lfs diff=lfs merge=lfs -text diff --git a/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/DELVEWHEEL b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/DELVEWHEEL new file mode 100644 index 0000000000000000000000000000000000000000..a7123d80cd0c058b99ad6adb5596e01b6a164cb4 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/DELVEWHEEL @@ -0,0 +1,2 @@ +Version: 1.10.1 +Arguments: ['C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-7xf_vzet\\cp310-win_amd64\\build\\venv\\Scripts\\delvewheel', 'repair', '--add-path', 'C:/a/numpy/numpy/.openblas/lib', '-w', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-7xf_vzet\\cp310-win_amd64\\repaired_wheel', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-7xf_vzet\\cp310-win_amd64\\built_wheel\\numpy-2.2.6-cp310-cp310-win_amd64.whl'] diff --git a/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/INSTALLER b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/LICENSE.txt b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..80edcc24fb577360fc0bcd8d815aee91bc5cad84 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/LICENSE.txt @@ -0,0 +1,950 @@ +Copyright (c) 2005-2024, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + +The NumPy repository and source distributions bundle several libraries that are +compatibly licensed. We list these here. + +Name: lapack-lite +Files: numpy/linalg/lapack_lite/* +License: BSD-3-Clause + For details, see numpy/linalg/lapack_lite/LICENSE.txt + +Name: dragon4 +Files: numpy/_core/src/multiarray/dragon4.c +License: MIT + For license text, see numpy/_core/src/multiarray/dragon4.c + +Name: libdivide +Files: numpy/_core/include/numpy/libdivide/* +License: Zlib + For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt + + +Note that the following files are vendored in the repository and sdist but not +installed in built numpy packages: + +Name: Meson +Files: vendored-meson/meson/* +License: Apache 2.0 + For license text, see vendored-meson/meson/COPYING + +Name: spin +Files: .spin/cmds.py +License: BSD-3 + For license text, see .spin/LICENSE + +Name: tempita +Files: numpy/_build_utils/tempita/* +License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt + +---- + +This binary distribution of NumPy also bundles the following software: + + +Name: OpenBLAS +Files: numpy.libs\libscipy_openblas*.dll +Description: bundled as a dynamically linked library +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: numpy.libs\libscipy_openblas*.dll +Description: bundled in OpenBLAS +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause-Attribution + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: numpy.libs\libscipy_openblas*.dll +Description: statically linked to files compiled with gcc +Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran +License: GPL-3.0-with-GCC-exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/METADATA b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..2f6433721a44963688483505a51650e33a71c4ac --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/METADATA @@ -0,0 +1,1071 @@ +Metadata-Version: 2.1 +Name: numpy +Version: 2.2.6 +Summary: Fundamental package for array computing in Python +Author: Travis E. Oliphant et al. +Maintainer-Email: NumPy Developers +License: Copyright (c) 2005-2024, NumPy Developers. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---- + + The NumPy repository and source distributions bundle several libraries that are + compatibly licensed. We list these here. + + Name: lapack-lite + Files: numpy/linalg/lapack_lite/* + License: BSD-3-Clause + For details, see numpy/linalg/lapack_lite/LICENSE.txt + + Name: dragon4 + Files: numpy/_core/src/multiarray/dragon4.c + License: MIT + For license text, see numpy/_core/src/multiarray/dragon4.c + + Name: libdivide + Files: numpy/_core/include/numpy/libdivide/* + License: Zlib + For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt + + + Note that the following files are vendored in the repository and sdist but not + installed in built numpy packages: + + Name: Meson + Files: vendored-meson/meson/* + License: Apache 2.0 + For license text, see vendored-meson/meson/COPYING + + Name: spin + Files: .spin/cmds.py + License: BSD-3 + For license text, see .spin/LICENSE + + Name: tempita + Files: numpy/_build_utils/tempita/* + License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt + + ---- + + This binary distribution of NumPy also bundles the following software: + + + Name: OpenBLAS + Files: numpy.libs\libscipy_openblas*.dll + Description: bundled as a dynamically linked library + Availability: https://github.com/OpenMathLib/OpenBLAS/ + License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + Name: LAPACK + Files: numpy.libs\libscipy_openblas*.dll + Description: bundled in OpenBLAS + Availability: https://github.com/OpenMathLib/OpenBLAS/ + License: BSD-3-Clause-Attribution + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + Name: GCC runtime library + Files: numpy.libs\libscipy_openblas*.dll + Description: statically linked to files compiled with gcc + Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran + License: GPL-3.0-with-GCC-exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + + ---- + + Full text of license texts referred to above follows (that they are + listed below does not necessarily imply the conditions apply to the + present binary release): + + ---- + + GCC RUNTIME LIBRARY EXCEPTION + + Version 3.1, 31 March 2009 + + Copyright (C) 2009 Free Software Foundation, Inc. + + Everyone is permitted to copy and distribute verbatim copies of this + license document, but changing it is not allowed. + + This GCC Runtime Library Exception ("Exception") is an additional + permission under section 7 of the GNU General Public License, version + 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that + bears a notice placed by the copyright holder of the file stating that + the file is governed by GPLv3 along with this Exception. + + When you use GCC to compile a program, GCC may combine portions of + certain GCC header files and runtime libraries with the compiled + program. The purpose of this Exception is to allow compilation of + non-GPL (including proprietary) programs to use, in this way, the + header files and runtime libraries covered by this Exception. + + 0. Definitions. + + A file is an "Independent Module" if it either requires the Runtime + Library for execution after a Compilation Process, or makes use of an + interface provided by the Runtime Library, but is not otherwise based + on the Runtime Library. + + "GCC" means a version of the GNU Compiler Collection, with or without + modifications, governed by version 3 (or a specified later version) of + the GNU General Public License (GPL) with the option of using any + subsequent versions published by the FSF. + + "GPL-compatible Software" is software whose conditions of propagation, + modification and use would permit combination with GCC in accord with + the license of GCC. + + "Target Code" refers to output from any compiler for a real or virtual + target processor architecture, in executable form or suitable for + input to an assembler, loader, linker and/or execution + phase. Notwithstanding that, Target Code does not include data in any + format that is used as a compiler intermediate representation, or used + for producing a compiler intermediate representation. + + The "Compilation Process" transforms code entirely represented in + non-intermediate languages designed for human-written code, and/or in + Java Virtual Machine byte code, into Target Code. Thus, for example, + use of source code generators and preprocessors need not be considered + part of the Compilation Process, since the Compilation Process can be + understood as starting with the output of the generators or + preprocessors. + + A Compilation Process is "Eligible" if it is done using GCC, alone or + with other GPL-compatible software, or if it is done without using any + work based on GCC. For example, using non-GPL-compatible Software to + optimize any GCC intermediate representations would not qualify as an + Eligible Compilation Process. + + 1. Grant of Additional Permission. + + You have permission to propagate a work of Target Code formed by + combining the Runtime Library with Independent Modules, even if such + propagation would otherwise violate the terms of GPLv3, provided that + all Target Code was generated by Eligible Compilation Processes. You + may then convey such a combination under terms of your choice, + consistent with the licensing of the Independent Modules. + + 2. No Weakening of GCC Copyleft. + + The availability of this Exception does not imply any general + presumption that third-party software is unaffected by the copyleft + requirements of the license of GCC. + + ---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for + software and other kinds of works. + + The licenses for most software and other practical works are designed + to take away your freedom to share and change the works. By contrast, + the GNU General Public License is intended to guarantee your freedom to + share and change all versions of a program--to make sure it remains free + software for all its users. We, the Free Software Foundation, use the + GNU General Public License for most of our software; it applies also to + any other work released this way by its authors. You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + them if you wish), that you receive source code or can get it if you + want it, that you can change the software or use pieces of it in new + free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you + these rights or asking you to surrender the rights. Therefore, you have + certain responsibilities if you distribute copies of the software, or if + you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must pass on to the recipients the same + freedoms that you received. You must make sure that they, too, receive + or can get the source code. And you must show them these terms so they + know their rights. + + Developers that use the GNU GPL protect your rights with two steps: + (1) assert copyright on the software, and (2) offer you this License + giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains + that there is no warranty for this free software. For both users' and + authors' sake, the GPL requires that modified versions be marked as + changed, so that their problems will not be attributed erroneously to + authors of previous versions. + + Some devices are designed to deny users access to install or run + modified versions of the software inside them, although the manufacturer + can do so. This is fundamentally incompatible with the aim of + protecting users' freedom to change the software. The systematic + pattern of such abuse occurs in the area of products for individuals to + use, which is precisely where it is most unacceptable. Therefore, we + have designed this version of the GPL to prohibit the practice for those + products. If such problems arise substantially in other domains, we + stand ready to extend this provision to those domains in future versions + of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. + States should not allow patents to restrict development and use of + software on general-purpose computers, but in those that do, we wish to + avoid the special danger that patents applied to a free program could + make it effectively proprietary. To prevent this, the GPL assures that + patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and + modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of + works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this + License. Each licensee is addressed as "you". "Licensees" and + "recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work + in a fashion requiring copyright permission, other than the making of an + exact copy. The resulting work is called a "modified version" of the + earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based + on the Program. + + To "propagate" a work means to do anything with it that, without + permission, would make you directly or secondarily liable for + infringement under applicable copyright law, except executing it on a + computer or modifying a private copy. Propagation includes copying, + distribution (with or without modification), making available to the + public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other + parties to make or receive copies. Mere interaction with a user through + a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" + to the extent that it includes a convenient and prominently visible + feature that (1) displays an appropriate copyright notice, and (2) + tells the user that there is no warranty for the work (except to the + extent that warranties are provided), that licensees may convey the + work under this License, and how to view a copy of this License. If + the interface presents a list of user commands or options, such as a + menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work + for making modifications to it. "Object code" means any non-source + form of a work. + + A "Standard Interface" means an interface that either is an official + standard defined by a recognized standards body, or, in the case of + interfaces specified for a particular programming language, one that + is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other + than the work as a whole, that (a) is included in the normal form of + packaging a Major Component, but which is not part of that Major + Component, and (b) serves only to enable use of the work with that + Major Component, or to implement a Standard Interface for which an + implementation is available to the public in source code form. A + "Major Component", in this context, means a major essential component + (kernel, window system, and so on) of the specific operating system + (if any) on which the executable work runs, or a compiler used to + produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all + the source code needed to generate, install, and (for an executable + work) run the object code and to modify the work, including scripts to + control those activities. However, it does not include the work's + System Libraries, or general-purpose tools or generally available free + programs which are used unmodified in performing those activities but + which are not part of the work. For example, Corresponding Source + includes interface definition files associated with source files for + the work, and the source code for shared libraries and dynamically + linked subprograms that the work is specifically designed to require, + such as by intimate data communication or control flow between those + subprograms and other parts of the work. + + The Corresponding Source need not include anything that users + can regenerate automatically from other parts of the Corresponding + Source. + + The Corresponding Source for a work in source code form is that + same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of + copyright on the Program, and are irrevocable provided the stated + conditions are met. This License explicitly affirms your unlimited + permission to run the unmodified Program. The output from running a + covered work is covered by this License only if the output, given its + content, constitutes a covered work. This License acknowledges your + rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not + convey, without conditions so long as your license otherwise remains + in force. You may convey covered works to others for the sole purpose + of having them make modifications exclusively for you, or provide you + with facilities for running those works, provided that you comply with + the terms of this License in conveying all material for which you do + not control copyright. Those thus making or running the covered works + for you must do so exclusively on your behalf, under your direction + and control, on terms that prohibit them from making any copies of + your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under + the conditions stated below. Sublicensing is not allowed; section 10 + makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological + measure under any applicable law fulfilling obligations under article + 11 of the WIPO copyright treaty adopted on 20 December 1996, or + similar laws prohibiting or restricting circumvention of such + measures. + + When you convey a covered work, you waive any legal power to forbid + circumvention of technological measures to the extent such circumvention + is effected by exercising rights under this License with respect to + the covered work, and you disclaim any intention to limit operation or + modification of the work as a means of enforcing, against the work's + users, your or third parties' legal rights to forbid circumvention of + technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you + receive it, in any medium, provided that you conspicuously and + appropriately publish on each copy an appropriate copyright notice; + keep intact all notices stating that this License and any + non-permissive terms added in accord with section 7 apply to the code; + keep intact all notices of the absence of any warranty; and give all + recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, + and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to + produce it from the Program, in the form of source code under the + terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent + works, which are not by their nature extensions of the covered work, + and which are not combined with it such as to form a larger program, + in or on a volume of a storage or distribution medium, is called an + "aggregate" if the compilation and its resulting copyright are not + used to limit the access or legal rights of the compilation's users + beyond what the individual works permit. Inclusion of a covered work + in an aggregate does not cause this License to apply to the other + parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms + of sections 4 and 5, provided that you also convey the + machine-readable Corresponding Source under the terms of this License, + in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded + from the Corresponding Source as a System Library, need not be + included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any + tangible personal property which is normally used for personal, family, + or household purposes, or (2) anything designed or sold for incorporation + into a dwelling. In determining whether a product is a consumer product, + doubtful cases shall be resolved in favor of coverage. For a particular + product received by a particular user, "normally used" refers to a + typical or common use of that class of product, regardless of the status + of the particular user or of the way in which the particular user + actually uses, or expects or is expected to use, the product. A product + is a consumer product regardless of whether the product has substantial + commercial, industrial or non-consumer uses, unless such uses represent + the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, + procedures, authorization keys, or other information required to install + and execute modified versions of a covered work in that User Product from + a modified version of its Corresponding Source. The information must + suffice to ensure that the continued functioning of the modified object + code is in no case prevented or interfered with solely because + modification has been made. + + If you convey an object code work under this section in, or with, or + specifically for use in, a User Product, and the conveying occurs as + part of a transaction in which the right of possession and use of the + User Product is transferred to the recipient in perpetuity or for a + fixed term (regardless of how the transaction is characterized), the + Corresponding Source conveyed under this section must be accompanied + by the Installation Information. But this requirement does not apply + if neither you nor any third party retains the ability to install + modified object code on the User Product (for example, the work has + been installed in ROM). + + The requirement to provide Installation Information does not include a + requirement to continue to provide support service, warranty, or updates + for a work that has been modified or installed by the recipient, or for + the User Product in which it has been modified or installed. Access to a + network may be denied when the modification itself materially and + adversely affects the operation of the network or violates the rules and + protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, + in accord with this section must be in a format that is publicly + documented (and with an implementation available to the public in + source code form), and must require no special password or key for + unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this + License by making exceptions from one or more of its conditions. + Additional permissions that are applicable to the entire Program shall + be treated as though they were included in this License, to the extent + that they are valid under applicable law. If additional permissions + apply only to part of the Program, that part may be used separately + under those permissions, but the entire Program remains governed by + this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option + remove any additional permissions from that copy, or from any part of + it. (Additional permissions may be written to require their own + removal in certain cases when you modify the work.) You may place + additional permissions on material, added by you to a covered work, + for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you + add to a covered work, you may (if authorized by the copyright holders of + that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further + restrictions" within the meaning of section 10. If the Program as you + received it, or any part of it, contains a notice stating that it is + governed by this License along with a term that is a further + restriction, you may remove that term. If a license document contains + a further restriction but permits relicensing or conveying under this + License, you may add to a covered work material governed by the terms + of that license document, provided that the further restriction does + not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you + must place, in the relevant source files, a statement of the + additional terms that apply to those files, or a notice indicating + where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the + form of a separately written license, or stated as exceptions; + the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly + provided under this License. Any attempt otherwise to propagate or + modify it is void, and will automatically terminate your rights under + this License (including any patent licenses granted under the third + paragraph of section 11). + + However, if you cease all violation of this License, then your + license from a particular copyright holder is reinstated (a) + provisionally, unless and until the copyright holder explicitly and + finally terminates your license, and (b) permanently, if the copyright + holder fails to notify you of the violation by some reasonable means + prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is + reinstated permanently if the copyright holder notifies you of the + violation by some reasonable means, this is the first time you have + received notice of violation of this License (for any work) from that + copyright holder, and you cure the violation prior to 30 days after + your receipt of the notice. + + Termination of your rights under this section does not terminate the + licenses of parties who have received copies or rights from you under + this License. If your rights have been terminated and not permanently + reinstated, you do not qualify to receive new licenses for the same + material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or + run a copy of the Program. Ancillary propagation of a covered work + occurring solely as a consequence of using peer-to-peer transmission + to receive a copy likewise does not require acceptance. However, + nothing other than this License grants you permission to propagate or + modify any covered work. These actions infringe copyright if you do + not accept this License. Therefore, by modifying or propagating a + covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically + receives a license from the original licensors, to run, modify and + propagate that work, subject to this License. You are not responsible + for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an + organization, or substantially all assets of one, or subdividing an + organization, or merging organizations. If propagation of a covered + work results from an entity transaction, each party to that + transaction who receives a copy of the work also receives whatever + licenses to the work the party's predecessor in interest had or could + give under the previous paragraph, plus a right to possession of the + Corresponding Source of the work from the predecessor in interest, if + the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the + rights granted or affirmed under this License. For example, you may + not impose a license fee, royalty, or other charge for exercise of + rights granted under this License, and you may not initiate litigation + (including a cross-claim or counterclaim in a lawsuit) alleging that + any patent claim is infringed by making, using, selling, offering for + sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this + License of the Program or a work on which the Program is based. The + work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims + owned or controlled by the contributor, whether already acquired or + hereafter acquired, that would be infringed by some manner, permitted + by this License, of making, using, or selling its contributor version, + but do not include claims that would be infringed only as a + consequence of further modification of the contributor version. For + purposes of this definition, "control" includes the right to grant + patent sublicenses in a manner consistent with the requirements of + this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free + patent license under the contributor's essential patent claims, to + make, use, sell, offer for sale, import and otherwise run, modify and + propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express + agreement or commitment, however denominated, not to enforce a patent + (such as an express permission to practice a patent or covenant not to + sue for patent infringement). To "grant" such a patent license to a + party means to make such an agreement or commitment not to enforce a + patent against the party. + + If you convey a covered work, knowingly relying on a patent license, + and the Corresponding Source of the work is not available for anyone + to copy, free of charge and under the terms of this License, through a + publicly available network server or other readily accessible means, + then you must either (1) cause the Corresponding Source to be so + available, or (2) arrange to deprive yourself of the benefit of the + patent license for this particular work, or (3) arrange, in a manner + consistent with the requirements of this License, to extend the patent + license to downstream recipients. "Knowingly relying" means you have + actual knowledge that, but for the patent license, your conveying the + covered work in a country, or your recipient's use of the covered work + in a country, would infringe one or more identifiable patents in that + country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or + arrangement, you convey, or propagate by procuring conveyance of, a + covered work, and grant a patent license to some of the parties + receiving the covered work authorizing them to use, propagate, modify + or convey a specific copy of the covered work, then the patent license + you grant is automatically extended to all recipients of the covered + work and works based on it. + + A patent license is "discriminatory" if it does not include within + the scope of its coverage, prohibits the exercise of, or is + conditioned on the non-exercise of one or more of the rights that are + specifically granted under this License. You may not convey a covered + work if you are a party to an arrangement with a third party that is + in the business of distributing software, under which you make payment + to the third party based on the extent of your activity of conveying + the work, and under which the third party grants, to any of the + parties who would receive the covered work from you, a discriminatory + patent license (a) in connection with copies of the covered work + conveyed by you (or copies made from those copies), or (b) primarily + for and in connection with specific products or compilations that + contain the covered work, unless you entered into that arrangement, + or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting + any implied license or other defenses to infringement that may + otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot convey a + covered work so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you may + not convey it at all. For example, if you agree to terms that obligate you + to collect a royalty for further conveying from those to whom you convey + the Program, the only way you could satisfy both those terms and this + License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have + permission to link or combine any covered work with a work licensed + under version 3 of the GNU Affero General Public License into a single + combined work, and to convey the resulting work. The terms of this + License will continue to apply to the part which is the covered work, + but the special requirements of the GNU Affero General Public License, + section 13, concerning interaction through a network will apply to the + combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of + the GNU General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the + Program specifies that a certain numbered version of the GNU General + Public License "or any later version" applies to it, you have the + option of following the terms and conditions either of that numbered + version or of any later version published by the Free Software + Foundation. If the Program does not specify a version number of the + GNU General Public License, you may choose any version ever published + by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future + versions of the GNU General Public License can be used, that proxy's + public statement of acceptance of a version permanently authorizes you + to choose that version for the Program. + + Later license versions may give you additional or different + permissions. However, no additional obligations are imposed on any + author or copyright holder as a result of your choosing to follow a + later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY + APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT + HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY + OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, + THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM + IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF + ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS + THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY + GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE + USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF + DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD + PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), + EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF + SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided + above cannot be given local legal effect according to their terms, + reviewing courts shall apply local law that most closely approximates + an absolute waiver of all civil liability in connection with the + Program, unless a warranty or assumption of liability accompanies a + copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + state the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short + notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + + The hypothetical commands `show w' and `show c' should show the appropriate + parts of the General Public License. Of course, your program's commands + might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, + if any, to sign a "copyright disclaimer" for the program, if necessary. + For more information on this, and how to apply and follow the GNU GPL, see + . + + The GNU General Public License does not permit incorporating your program + into proprietary programs. If your program is a subroutine library, you + may consider it more useful to permit linking proprietary applications with + the library. If this is what you want to do, use the GNU Lesser General + Public License instead of this License. But first, please read + . + + +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Typing :: Typed +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Project-URL: homepage, https://numpy.org +Project-URL: documentation, https://numpy.org/doc/ +Project-URL: source, https://github.com/numpy/numpy +Project-URL: download, https://pypi.org/project/numpy/#files +Project-URL: tracker, https://github.com/numpy/numpy/issues +Project-URL: release notes, https://numpy.org/doc/stable/release +Requires-Python: >=3.10 +Description-Content-Type: text/markdown + +

+ +


+ + +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)]( +https://numfocus.org) +[![PyPI Downloads](https://img.shields.io/pypi/dm/numpy.svg?label=PyPI%20downloads)]( +https://pypi.org/project/numpy/) +[![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/numpy.svg?label=Conda%20downloads)]( +https://anaconda.org/conda-forge/numpy) +[![Stack Overflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)]( +https://stackoverflow.com/questions/tagged/numpy) +[![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( +https://doi.org/10.1038/s41586-020-2649-2) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) + + +NumPy is the fundamental package for scientific computing with Python. + +- **Website:** https://www.numpy.org +- **Documentation:** https://numpy.org/doc +- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion +- **Source code:** https://github.com/numpy/numpy +- **Contributing:** https://www.numpy.org/devdocs/dev/index.html +- **Bug reports:** https://github.com/numpy/numpy/issues +- **Report a security vulnerability:** https://tidelift.com/docs/security + +It provides: + +- a powerful N-dimensional array object +- sophisticated (broadcasting) functions +- tools for integrating C/C++ and Fortran code +- useful linear algebra, Fourier transform, and random number capabilities + +Testing: + +NumPy requires `pytest` and `hypothesis`. Tests can then be run after installation with: + + python -c "import numpy, sys; sys.exit(numpy.test() is False)" + +Code of Conduct +---------------------- + +NumPy is a community-driven open source project developed by a diverse group of +[contributors](https://numpy.org/teams/). The NumPy leadership has made a strong +commitment to creating an open, inclusive, and positive community. Please read the +[NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact +with others in a way that makes our community thrive. + +Call for Contributions +---------------------- + +The NumPy project welcomes your expertise and enthusiasm! + +Small improvements or fixes are always appreciated. If you are considering larger contributions +to the source code, please contact us through the [mailing +list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. + +Writing code isn’t the only way to contribute to NumPy. You can also: +- review pull requests +- help us stay on top of new and old issues +- develop tutorials, presentations, and other educational materials +- maintain and improve [our website](https://github.com/numpy/numpy.org) +- develop graphic design for our brand assets and promotional materials +- translate website content +- help with outreach and onboard new contributors +- write grant proposals and help with other fundraising efforts + +For more information about the ways you can contribute to NumPy, visit [our website](https://numpy.org/contribute/). +If you’re unsure where to start or how your skills fit in, reach out! You can +ask on the mailing list or here, on GitHub, by opening a new issue or leaving a +comment on a relevant issue that is already open. + +Our preferred channels of communication are all public, but if you’d like to +speak to us in private first, contact our community coordinators at +numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for +an invitation). + +We also have a biweekly community call, details of which are announced on the +mailing list. You are very welcome to join. + +If you are new to contributing to open source, [this +guide](https://opensource.guide/how-to-contribute/) helps explain why, what, +and how to successfully get involved. diff --git a/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/RECORD b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a954a4951efa856a98cb5940653ec3c1fdaae09f --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/RECORD @@ -0,0 +1,1518 @@ +../../Scripts/f2py.exe,sha256=NajvPazW0Bm9nhYX1U55RzBwUES8Ghk00WrkaE1Xo4A,108406 +../../Scripts/numpy-config.exe,sha256=xsDBgGKyZbv2PaqXjtaf9cu2PNqNyq53h7Vj1h0M0p4,108406 +numpy-2.2.6-cp310-cp310-win_amd64.whl,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy-2.2.6.dist-info/DELVEWHEEL,sha256=aqEcjAAXewVMaI3h7J_WiImSkhX8Rj8Y-2Sc1zwnwe8,446 +numpy-2.2.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +numpy-2.2.6.dist-info/LICENSE.txt,sha256=FCVsw6LJ0yrChNqWuTf-tE9y3ZC-4jF6wwIBZoRq2Z0,47709 +numpy-2.2.6.dist-info/METADATA,sha256=Ip81RLAoBeD2oSAw4VXYpF_TpBALMpFXQXXmp28g4eE,60844 +numpy-2.2.6.dist-info/RECORD,, +numpy-2.2.6.dist-info/WHEEL,sha256=1nIT8bOU3dBEtO1OHNUw1PB7s17JH9tAQ93SLqU9JNM,85 +numpy-2.2.6.dist-info/entry_points.txt,sha256=4mXDNhJDQ9GHqMBeRJ8B3PlixTFmkXGqU3RVuac20q0,172 +numpy.libs/libscipy_openblas64_-13e2df515630b4a41f92893938845698.dll,sha256=ZUfp-5Zul3PK7idV6RqL9NbzovDuv5ZGsBWPhnXqSrU,20390912 +numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll,sha256=pMIim9wqKmMKzcCVtNhgCOXD47x3cxdDVPPaT1vrnN4,575056 +numpy/__config__.py,sha256=lPZgoJ9ThwbUYdG8tbz-ln4E0ggYHCdEFwidCfREb2o,5693 +numpy/__config__.pyi,sha256=b1FAG-TOWL9zaaazZoHWMp2OogmcZSTn3mkUJ5MRh3A,2479 +numpy/__init__.cython-30.pxd,sha256=bwqZBDOkt5Nce2EmeHd9mV1eGoLTSoctn2n-lCWKWxc,48041 +numpy/__init__.pxd,sha256=HUXemn5QF2YGUqOvJmqmFYpWZ64RPv5GwS66LG1eu5g,44591 +numpy/__init__.py,sha256=rSOOdujG-9VqGebIlIZM9Ga9LtdgBMrInnjAGfpiVgc,23016 +numpy/__init__.pyi,sha256=2TpZYYk9frp0cueFUUWqAw-HroYd1k4Y5fN_-M00_0U,217089 +numpy/__pycache__/__config__.cpython-310.pyc,, +numpy/__pycache__/__init__.cpython-310.pyc,, +numpy/__pycache__/_array_api_info.cpython-310.pyc,, +numpy/__pycache__/_configtool.cpython-310.pyc,, +numpy/__pycache__/_distributor_init.cpython-310.pyc,, +numpy/__pycache__/_expired_attrs_2_0.cpython-310.pyc,, +numpy/__pycache__/_globals.cpython-310.pyc,, +numpy/__pycache__/_pytesttester.cpython-310.pyc,, +numpy/__pycache__/conftest.cpython-310.pyc,, +numpy/__pycache__/ctypeslib.cpython-310.pyc,, +numpy/__pycache__/dtypes.cpython-310.pyc,, +numpy/__pycache__/exceptions.cpython-310.pyc,, +numpy/__pycache__/matlib.cpython-310.pyc,, +numpy/__pycache__/version.cpython-310.pyc,, +numpy/_array_api_info.py,sha256=Qd_2x_pUQLdBtnPKodEZy2Zds-R5i2DKQacMmMVRaRk,10727 +numpy/_array_api_info.pyi,sha256=Y7SGdw5yxh4JQGeavwCaN2fpR7DR0KzU8GoOn7SKoiw,5102 +numpy/_configtool.py,sha256=CgdDWSv9AX6XNKIibBXBisvuCu0aUkVVKbNudJfERIw,1046 +numpy/_configtool.pyi,sha256=IlC395h8TlcZ4DiSW5i6NBQO9I74ERfXpwSYAktzoaU,25 +numpy/_core/__init__.py,sha256=ziVwv-eSrrG6jAQYH3eQcPtNsdRZaWBnvzKCj4MrtbA,5792 +numpy/_core/__init__.pyi,sha256=C5NQDIktXlR1OosGgyvY87pyotkyJr3Ci2dMWTLpSi4,88 +numpy/_core/__pycache__/__init__.cpython-310.pyc,, +numpy/_core/__pycache__/_add_newdocs.cpython-310.pyc,, +numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc,, +numpy/_core/__pycache__/_asarray.cpython-310.pyc,, +numpy/_core/__pycache__/_dtype.cpython-310.pyc,, +numpy/_core/__pycache__/_dtype_ctypes.cpython-310.pyc,, +numpy/_core/__pycache__/_exceptions.cpython-310.pyc,, +numpy/_core/__pycache__/_internal.cpython-310.pyc,, +numpy/_core/__pycache__/_machar.cpython-310.pyc,, +numpy/_core/__pycache__/_methods.cpython-310.pyc,, +numpy/_core/__pycache__/_string_helpers.cpython-310.pyc,, +numpy/_core/__pycache__/_type_aliases.cpython-310.pyc,, +numpy/_core/__pycache__/_ufunc_config.cpython-310.pyc,, +numpy/_core/__pycache__/arrayprint.cpython-310.pyc,, +numpy/_core/__pycache__/cversions.cpython-310.pyc,, +numpy/_core/__pycache__/defchararray.cpython-310.pyc,, +numpy/_core/__pycache__/einsumfunc.cpython-310.pyc,, +numpy/_core/__pycache__/fromnumeric.cpython-310.pyc,, +numpy/_core/__pycache__/function_base.cpython-310.pyc,, +numpy/_core/__pycache__/getlimits.cpython-310.pyc,, +numpy/_core/__pycache__/memmap.cpython-310.pyc,, +numpy/_core/__pycache__/multiarray.cpython-310.pyc,, +numpy/_core/__pycache__/numeric.cpython-310.pyc,, +numpy/_core/__pycache__/numerictypes.cpython-310.pyc,, +numpy/_core/__pycache__/overrides.cpython-310.pyc,, +numpy/_core/__pycache__/printoptions.cpython-310.pyc,, +numpy/_core/__pycache__/records.cpython-310.pyc,, +numpy/_core/__pycache__/shape_base.cpython-310.pyc,, +numpy/_core/__pycache__/strings.cpython-310.pyc,, +numpy/_core/__pycache__/umath.cpython-310.pyc,, +numpy/_core/_add_newdocs.py,sha256=eQ_QDKS8SuavunWLZh9rz0QtRVtzDtrIhmc3OAgodXw,215729 +numpy/_core/_add_newdocs.pyi,sha256=ttPc9PlJ6lBkZrBrjzzWD4_jxmkIxpojL8RWR-d3e1c,171 +numpy/_core/_add_newdocs_scalars.py,sha256=TeVoRpAbqG46cLwGVf-PRK-cIt9qgAKstAI-nHs8abg,12992 +numpy/_core/_add_newdocs_scalars.pyi,sha256=qgD9RUeJdv6bkYewvQPXXCzO_roSKbaueq9PyvS6wSA,589 +numpy/_core/_asarray.py,sha256=3wUlbaCM-agtm5HVRzD6T2xiqNpafdZ77QVkgb-HCAw,4047 +numpy/_core/_asarray.pyi,sha256=vuCMO_o0RNeK0av8O5fvo93YOdxjJ2kgFeaw3GDobpY,1126 +numpy/_core/_dtype.py,sha256=itXloCOgln5qr5mMFvGA54AEpC0ueUA3qiEH6Z798O0,11108 +numpy/_core/_dtype.pyi,sha256=fVZoHORimwm-ck_pKiUx1RJvtSZoF7d5QxdGZI3ebVI,2009 +numpy/_core/_dtype_ctypes.py,sha256=ebN9U_QbymSP-ombYBYc4F7HtgC3ViucNW91MqpNhrM,3838 +numpy/_core/_dtype_ctypes.pyi,sha256=d5BudSdtj6n046OX9c-rUoX5zVGghdoO22yEhkjVRoM,3765 +numpy/_core/_exceptions.py,sha256=35d-to48ERMggcjK60hKzHYhZJUUAxWY1GcJWh9bPJE,5551 +numpy/_core/_exceptions.pyi,sha256=g4N5rEZf25Fbpu3AKAJn9c5MTlj671zZ6zWqPTd1Dnk,2219 +numpy/_core/_internal.py,sha256=f7PNtQIywHQYg7rGnL7Wgo27Wwswcwl1i5tlRKnjgmw,30127 +numpy/_core/_internal.pyi,sha256=sKos4TSABLgaoK1w_l06DewqULMQQIcNfQl6OPYBKPk,2726 +numpy/_core/_machar.py,sha256=TWlW2yOKVA7Vk-s9gusxRgumvgTdCcAPL_72k8SROd4,11921 +numpy/_core/_machar.pyi,sha256=g4N5rEZf25Fbpu3AKAJn9c5MTlj671zZ6zWqPTd1Dnk,2219 +numpy/_core/_methods.py,sha256=QQaL40BLBbWChnmVaD9zYZtBqfxyufDMeYP1X1MTEUY,9725 +numpy/_core/_methods.pyi,sha256=J9wblAExV__OQipgX4HbG74DOK5p4Ec1I31yNwv5WWg,580 +numpy/_core/_multiarray_tests.cp310-win_amd64.lib,sha256=k6ABYAfoJVE2IP8ZVxEYyvD8Drzw2Pg_JN48g42Vpko,2418 +numpy/_core/_multiarray_tests.cp310-win_amd64.pyd,sha256=mMCXuJLScamDo_14grZnivFZbj7sg6lbi4WNUYct_Qc,62976 +numpy/_core/_multiarray_umath.cp310-win_amd64.lib,sha256=OncmcAYnGZagNKpWXVyIw-4uhOiD-Owhc-R07N0q3NE,2192 +numpy/_core/_multiarray_umath.cp310-win_amd64.pyd,sha256=SD9aVg0qEhhdyc8jLHwjTdaDWb1jIiguUBAHlkzRyrY,4170752 +numpy/_core/_operand_flag_tests.cp310-win_amd64.lib,sha256=Y_7lPD2AeeHEwWIFemRMhF-AReBe7ZMjqDUzA45QbgU,2228 +numpy/_core/_operand_flag_tests.cp310-win_amd64.pyd,sha256=FMDimBMUKKqrpmDPRv0XqkN1mzvrM-fAd4VoNmddZ5U,11776 +numpy/_core/_rational_tests.cp310-win_amd64.lib,sha256=N7B5ksu9MIfVTzceX-Zv2EtxlhpTFacIByWtClHNbEU,2156 +numpy/_core/_rational_tests.cp310-win_amd64.pyd,sha256=QEnOp3ysiLFk8IA7wRbo7zAQl0uLsztRwJC87Mphgys,40448 +numpy/_core/_simd.cp310-win_amd64.lib,sha256=ClKZrlpbGBXAIxQ1UmwozeamzqimjOZPdyEbbiIVEXQ,1976 +numpy/_core/_simd.cp310-win_amd64.pyd,sha256=CWWZENpUOUEiSac_UGBDNv0BrtTVKXWbMfKrRqG6RMo,2238464 +numpy/_core/_simd.pyi,sha256=RN-uZiTi3jZNOgOOKlu97Mu1Ufkb8jvLUDDEnaW77Lc,694 +numpy/_core/_string_helpers.py,sha256=yqhYXnS3SgnP_4PvP7NUYvYJ7c5GeFJz8a8zI_uU0DI,2937 +numpy/_core/_string_helpers.pyi,sha256=bThH7ichGlrmQ6O38n71QcJ7Oi_mRPUf-DFMU5hdYU0,370 +numpy/_core/_struct_ufunc_tests.cp310-win_amd64.lib,sha256=OibD1bJoaWH5VANAYIGqkQSOv5Q0l2INY-t9TO4kdbM,2228 +numpy/_core/_struct_ufunc_tests.cp310-win_amd64.pyd,sha256=QbaxmzDHfoppzxarK6B58o4MhFp3IOxwv2-9-mrrsJI,13824 +numpy/_core/_type_aliases.py,sha256=uUDC8quSr11Ld3MRI0-4Rm1fiX1GFvu_rn1PVADuoKE,3608 +numpy/_core/_type_aliases.pyi,sha256=YU1X6HEVmLcxqwswoZ_vdckwXcNPuyNhRxZgHAubMdA,2496 +numpy/_core/_ufunc_config.py,sha256=82EbRK2pOLuW9YZRbepA0u8-_xzuYbDi2Y0rHv5Lq1k,15513 +numpy/_core/_ufunc_config.pyi,sha256=L0fOlCDoajz-jkPfFDa7xo28o_GJDCsm4GjgpSi3e_Y,1211 +numpy/_core/_umath_tests.cp310-win_amd64.lib,sha256=LIQEzeYe4g4nS9_a1BKZAfcNMRjwOWOFdGE4Tuw3T2A,2104 +numpy/_core/_umath_tests.cp310-win_amd64.pyd,sha256=zMLwmjhT76jO9RtPZOvJ3uPuzbjaooMktiT4nCfLSu0,33792 +numpy/_core/arrayprint.py,sha256=OY0l5ex3zDsf_sVW1XG2_LNX3779nOLWjcDY78e7n4Y,66568 +numpy/_core/arrayprint.pyi,sha256=DGL2dfAhzXXOzOCIe_dEErom1cKMVOW-TB_18WnQfho,7163 +numpy/_core/cversions.py,sha256=FISv1d4R917Bi5xJjKKy8Lo6AlFkV00WvSoB7l3acA4,360 +numpy/_core/defchararray.py,sha256=eOGMMdrk4pVbuGvv3EBjjcGBNusWrHK4tNVIpJO5Z7I,39212 +numpy/_core/defchararray.pyi,sha256=gIu3J0lelyWehaqPwUFwxgxlLtwSLMJWTVctGLS1yck,28058 +numpy/_core/einsumfunc.py,sha256=bsFwrvIcvknVoQuaTk91125O-HApybSIvEPahQD8VxE,54319 +numpy/_core/einsumfunc.pyi,sha256=fef7KF6KWLIhmBdbyDXt1J7g1C1QsMf0P05JKI7FGE4,5114 +numpy/_core/fromnumeric.py,sha256=0X8Dqwd61a4aovHetN0_zzIsac7pW7JOw7UK7UXrDVI,148176 +numpy/_core/fromnumeric.pyi,sha256=rwqQrnix67jCJ8I7YuIlVq_z-Kg_3UdBruLvIdTI4vI,42923 +numpy/_core/function_base.py,sha256=x3yPOA2m9lZMNC0qJrUNO65ddzgoISNe2mqzgJ0BIBM,20279 +numpy/_core/function_base.pyi,sha256=2wwLp3M55Vd6DCNCWzgk_DClf_0tOKyY5TbyNLE5YK0,5925 +numpy/_core/getlimits.py,sha256=d3G99QSPEYNn73Mzxab-v1RWwVzBp67V_TcXvJ6wbCs,26859 +numpy/_core/getlimits.pyi,sha256=3u55btDSVkpbsnFxkCWqRY7LZ1WhGop_LAUnjJfOUR8,64 +numpy/_core/include/numpy/__multiarray_api.c,sha256=Vc65MKuXE5761vVI9qdZkPyg3C5_k_ickum0Q04EOOA,13045 +numpy/_core/include/numpy/__multiarray_api.h,sha256=SzcxgIDQ8m4Ds1fvlM9fQ8RuINJpcPLRKzpb9HFDtpw,62996 +numpy/_core/include/numpy/__ufunc_api.c,sha256=NoTcyLqrAF8F3AE0TDvlDFS7DXuFJRpoINEaDnZWhys,1809 +numpy/_core/include/numpy/__ufunc_api.h,sha256=Q36B7NKN8E6GLytefgBOGLfgRnt8ayO1Conr2QWlqkA,13506 +numpy/_core/include/numpy/_neighborhood_iterator_imp.h,sha256=s5TK2aPpClbw4CbVJCij__hzoh5IgHIIZK0k6FKtqfc,1947 +numpy/_core/include/numpy/_numpyconfig.h,sha256=mqDMFv5Vhk2nHXNf6TIWzz7ozrtc9aNaN8_LJZBYjX0,902 +numpy/_core/include/numpy/_public_dtype_api_table.h,sha256=4ylG8s52kZEx__QODt_7Do8QitmhDSvTeZ7Lar0fOgo,4660 +numpy/_core/include/numpy/arrayobject.h,sha256=ghWzloPUkSaVkcsAnBnpbrxtXeXL-mkzVGJQEHFxjnk,211 +numpy/_core/include/numpy/arrayscalars.h,sha256=4TrsilxaUiH4mVCkElEPTM_C_8c67O9R4Whx-3QzDE4,4439 +numpy/_core/include/numpy/dtype_api.h,sha256=cfQuPb0zrVqYFdWauOqbgdXR8rtm4DjNz2nbfSWvSRo,19718 +numpy/_core/include/numpy/halffloat.h,sha256=qYgX5iQfNzXICsnd0MCRq5ELhhfFjlRGm1xXGimQm44,2029 +numpy/_core/include/numpy/ndarrayobject.h,sha256=V5Zkf5a9vWyV8ZInBgAceBn7c9GK4aquhzeGTW_Sgls,12361 +numpy/_core/include/numpy/ndarraytypes.h,sha256=R3CFlGdGUQNj7rEovi8zRkJoJMk0y8682cu1kX1soAA,66986 +numpy/_core/include/numpy/npy_1_7_deprecated_api.h,sha256=eYbQlqb6mzJnUKuVfl2mmrMpvB3GN2rFgHazFO9CKT8,3858 +numpy/_core/include/numpy/npy_2_compat.h,sha256=VxsRXAtDfLlXkvH-ErZRSuH49k9EjcFwcSUSfTPRzAU,8795 +numpy/_core/include/numpy/npy_2_complexcompat.h,sha256=uW0iF-qMwQNn4PvIfWCrYce6b4OrYUO4BWu-VYYAZag,885 +numpy/_core/include/numpy/npy_3kcompat.h,sha256=dV01ltbxntPY8cN7WAL4MX3KHeyCLeSBDQreDxs09aQ,10022 +numpy/_core/include/numpy/npy_common.h,sha256=3njI4LhBxMZvkkdG3nLq0NZI7lNqx9dnvcTfCgEW0rI,37621 +numpy/_core/include/numpy/npy_cpu.h,sha256=FSFhzOQ_lvcpGw-4Qtzqu5W8eiD6k_K351_9WiI5uTg,4837 +numpy/_core/include/numpy/npy_endian.h,sha256=NZSi-5CbqZ92AUztILDJLBKP61-VQezmAatYTNLwRu8,2912 +numpy/_core/include/numpy/npy_math.h,sha256=ksdiKBXDfpEHB1s9m5yinyhjdcc0h-zJcfXEuoVHAd8,19460 +numpy/_core/include/numpy/npy_no_deprecated_api.h,sha256=jIcjEP2AbovDTfgE-qtvdP51_dVGjVnEGBX86rlGSKE,698 +numpy/_core/include/numpy/npy_os.h,sha256=j044vd1C1oCcW52r3htiVNhUaJSEqCjKrODwMHq3TU0,1298 +numpy/_core/include/numpy/numpyconfig.h,sha256=zDucznj2xbSOImOJLhjYHosMFqkoWY6AZvdRDpUrcw8,7339 +numpy/_core/include/numpy/random/LICENSE.txt,sha256=1UR2FVi1EIZsIffootVxb8p24LmBF-O2uGMU23JE0VA,1039 +numpy/_core/include/numpy/random/bitgen.h,sha256=_H0uXqmnub4PxnJWdMWaNqfpyFDu2KB0skf2wc5vjUc,508 +numpy/_core/include/numpy/random/distributions.h,sha256=GLURa3sFESZE0_0RK-3Gqmfa96itBHw8LlsNyy9EPt4,10070 +numpy/_core/include/numpy/random/libdivide.h,sha256=F9PLx6TcOk-sd0dObe0nWLyz4HhbHv2K7voR_kolpGU,82217 +numpy/_core/include/numpy/ufuncobject.h,sha256=PO10lEoSvptYe57rrGpMiF0tytnmq9PW2UeMIwN06MY,12265 +numpy/_core/include/numpy/utils.h,sha256=vzJAbatJYfxHmX2yL_xBirmB4mEGLOhJ92JlV9s8yPs,1222 +numpy/_core/lib/npy-pkg-config/mlib.ini,sha256=hYWFyoBxE036dh19si8UPka01H2cv64qlc4ZtgoA_7A,156 +numpy/_core/lib/npy-pkg-config/npymath.ini,sha256=e0rdsb00Y93VuammuvIIFlzZtnUAXwsS1XNKlCU8mFQ,381 +numpy/_core/lib/npymath.lib,sha256=PWDh18WNxuX3csSIf0ZnOL3oTGclmJj21Voz_R72KYw,154174 +numpy/_core/lib/pkgconfig/numpy.pc,sha256=mpZeixCAkzCIFdSNDBWUmRM2fgJVcN3o9vtNN01yeDk,198 +numpy/_core/memmap.py,sha256=gtXDgEHkmzuCjwPpYFT5gkFzziYC41JAGYuKNDo2CfI,13025 +numpy/_core/memmap.pyi,sha256=n0kBe4iQD5lcWvAvVhdUU18YIoPX6Sf5e2qh9IdO5uQ,50 +numpy/_core/multiarray.py,sha256=-cH3HHzztYiSSESwdN7zcgqXk1L_AREuimMRJBk_cTE,59891 +numpy/_core/multiarray.pyi,sha256=YEdOhn2NyCGHLQRgpe1Gd45xBHFlJSddPsHiesSWmAM,34751 +numpy/_core/numeric.py,sha256=81YhZQ2LpEvj5Unv5EUlbXWktmkGR9dN2CvOWEjYukE,84439 +numpy/_core/numeric.pyi,sha256=JVDzjcZBz_VOzHxL9cYux2LPrvq9Fg7ZI6fOiLciiAA,20094 +numpy/_core/numerictypes.py,sha256=o2lhl1SFpGuDeIGFzfgS2zLWDwIAt41gi3VphVd9Cwg,16754 +numpy/_core/numerictypes.pyi,sha256=Nuq8mEo62a0A1-6HJNTrONN7b9hkUNSp-HTRP28rYkc,3750 +numpy/_core/overrides.py,sha256=gHrmLDRjVyi3poY5qYVrsAP9QNvvhXtiW1ggx5gwgGs,7392 +numpy/_core/overrides.pyi,sha256=eSG0Xzxm7wutARSaQ_mmHPRDX-xpFHM0TAVheebF3S8,1793 +numpy/_core/printoptions.py,sha256=btxyfisjJ_7DB5JtZKAtaLYL9qmcmTnnJ8pHFcwn2Wc,1095 +numpy/_core/printoptions.pyi,sha256=QE36MVL3BgqflyQuj6UOzywbnELMiLeyNz_1sALvOSU,622 +numpy/_core/records.py,sha256=tQof8zOMoY8UXn4ZtoIU8O6lPj8pUsDQFCOkZRAat9A,37953 +numpy/_core/records.pyi,sha256=9df4GE_hkYVI-IlqCI6cxJHtYvuQ8TUwsXSjM_U8fOI,9104 +numpy/_core/shape_base.py,sha256=ZROh3EbNF1IuHLF_1ecJ0NXpwUf__RXw8M2bWC1IO0Q,33887 +numpy/_core/shape_base.pyi,sha256=vIF5OAbTOYBsImf_9BdIlT115WNburRg2KAFUV9ZMIs,4720 +numpy/_core/strings.py,sha256=ZAKXDmpIzKReWYaUl-F4h5Fht4VVJ-z0VQpRV-Wf0kc,47313 +numpy/_core/strings.pyi,sha256=XfkkJ03nRnTkqZop0LKUE_K-jVhrA4RFh9z-LHGQMng,13270 +numpy/_core/tests/__pycache__/_locales.cpython-310.pyc,, +numpy/_core/tests/__pycache__/_natype.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test__exceptions.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_abc.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_api.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_argparse.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_array_api_info.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_array_coercion.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_array_interface.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_arraymethod.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_arrayobject.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_arrayprint.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_casting_unittests.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_conversion_utils.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_cpu_features.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_cython.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_datetime.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_defchararray.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_deprecations.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_dlpack.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_dtype.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_einsum.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_errstate.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_extint128.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_function_base.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_getlimits.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_half.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_hashtable.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_indexerrors.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_indexing.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_item_selection.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_limited_api.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_longdouble.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_machar.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_mem_overlap.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_mem_policy.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_memmap.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_multiarray.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_multithreading.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_nditer.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_numeric.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_numerictypes.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_overrides.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_print.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_protocols.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_records.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_regression.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_scalar_methods.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_scalarinherit.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_scalarmath.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_scalarprint.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_shape_base.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_simd.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_simd_module.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_stringdtype.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_strings.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_ufunc.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_umath.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_umath_complex.cpython-310.pyc,, +numpy/_core/tests/__pycache__/test_unicode.cpython-310.pyc,, +numpy/_core/tests/_locales.py,sha256=xsKJqT3ZZiJGLQbm4Xx1W2i9KLqx14oQE9wUa49PkJ8,2248 +numpy/_core/tests/_natype.py,sha256=uVXHCahmyDbZZAaQ-OKqaWnOgJRIYRETU06drssSSP0,6457 +numpy/_core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 +numpy/_core/tests/data/generate_umath_validation_data.cpp,sha256=9TBdxpPo0djv1CKxQ6_DbGKRxIZVawitAm7AMmWKroI,6012 +numpy/_core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 +numpy/_core/tests/data/umath-validation-set-README.txt,sha256=GfrkmU_wTjpLkOftWDuGayEDdV3RPpN2GRVQX61VgWI,982 +numpy/_core/tests/data/umath-validation-set-arccos.csv,sha256=VUdQdKBFrpXHLlPtX2WYIK_uwkaXgky85CZ4aNuvmD4,62794 +numpy/_core/tests/data/umath-validation-set-arccosh.csv,sha256=tbuOQkvnYxSyJf_alGk3Zw3Vyv0HO5dMC1hUle2hWwQ,62794 +numpy/_core/tests/data/umath-validation-set-arcsin.csv,sha256=JPEWWMxgPKdNprDq0pH5QhJ2oiVCzuDbK-3WhTKny8o,62768 +numpy/_core/tests/data/umath-validation-set-arcsinh.csv,sha256=fwuq25xeS57kBExBuSNfewgHb-mgoR9wUGVqcOXbfoI,61718 +numpy/_core/tests/data/umath-validation-set-arctan.csv,sha256=nu33YyL-ALXSSF5cupCTaf_jTPLK_QyUfciNQGpffkY,61734 +numpy/_core/tests/data/umath-validation-set-arctanh.csv,sha256=wHSKFY2Yvbv3fnmmfLqPYpjhkEM88YHkFVpZQioyBDw,62768 +numpy/_core/tests/data/umath-validation-set-cbrt.csv,sha256=FFi_XxEnGrfJd7OxtjVFT6WFC2tUqKhVV8fmQfb0z8o,62275 +numpy/_core/tests/data/umath-validation-set-cos.csv,sha256=ccDri5_jQ84D_kAmSwZ_ztNUPIhzhgycDtNsPB7m8dc,60497 +numpy/_core/tests/data/umath-validation-set-cosh.csv,sha256=DnN6RGvKQHAWIofchmhGH7kkJej2VtNwGGMRZGzBkTQ,62298 +numpy/_core/tests/data/umath-validation-set-exp.csv,sha256=mPhjF4KLe0bdwx38SJiNipD24ntLI_5aWc8h-V0UMgM,17903 +numpy/_core/tests/data/umath-validation-set-exp2.csv,sha256=sD94pK2EAZAyD2fDEocfw1oXNw1qTlW1TBwRlcpbcsI,60053 +numpy/_core/tests/data/umath-validation-set-expm1.csv,sha256=tyfZN5D8tlm7APgxCIPyuy774AZHytMOB59H9KewxEs,61728 +numpy/_core/tests/data/umath-validation-set-log.csv,sha256=CDPky64PjaURWhqkHxkLElmMiI21v5ugGGyzhdfUbnI,11963 +numpy/_core/tests/data/umath-validation-set-log10.csv,sha256=dW6FPEBlRx2pcS-7eui_GtqTpXzOy147il55qdP-8Ak,70551 +numpy/_core/tests/data/umath-validation-set-log1p.csv,sha256=2aEsHVcvRym-4535CkvJTsmHywkt01ZMfmjl-d4fvVI,61732 +numpy/_core/tests/data/umath-validation-set-log2.csv,sha256=aVZ7VMQ5urGOx5MMMOUmMKBhFLFE-U7y6DVCTeXQfo0,70546 +numpy/_core/tests/data/umath-validation-set-sin.csv,sha256=GvPrQUEYMX1iB2zjbfK26JUJOxtqbfiRUgXuAO1QcP0,59981 +numpy/_core/tests/data/umath-validation-set-sinh.csv,sha256=lc7OYcYWWpkxbMuRAWmogQ5cKi7EwsQ2ibiMdpJWYbw,61722 +numpy/_core/tests/data/umath-validation-set-tan.csv,sha256=fn7Dr9s6rcqGUzsmyJxve_Z18J4AUaSm-uo2N3N_hfk,61728 +numpy/_core/tests/data/umath-validation-set-tanh.csv,sha256=xSY5fgfeBXN6fal4XDed-VUcgFIy9qKOosa7vQ5v1-U,61728 +numpy/_core/tests/examples/cython/__pycache__/setup.cpython-310.pyc,, +numpy/_core/tests/examples/cython/checks.pyx,sha256=sGva3PIcoahXBIF3QkBWPjVnEWHPGQM1ktNp8OYEwUo,8183 +numpy/_core/tests/examples/cython/meson.build,sha256=EaUdTgpleUBROExDaFVMnWIYW4XDxFLFGK9ej_pTtQg,1311 +numpy/_core/tests/examples/cython/setup.py,sha256=tPQ9m6dr48JSvLpgmV-aVnMWMV0islzlSrynB5yGYDY,894 +numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-310.pyc,, +numpy/_core/tests/examples/limited_api/limited_api1.c,sha256=RcHe_nyyjv86gjF9E53cexQiGW-YNs8OGGqjrxCFhBc,363 +numpy/_core/tests/examples/limited_api/limited_api2.pyx,sha256=4P5-yu0yr8NBa-TFtw4v30LGjccRroRAQFFLaztEK9I,214 +numpy/_core/tests/examples/limited_api/limited_api_latest.c,sha256=drvrNSyOeF0Or0trDmayJWllTP7c4Nzpp9T0ydwPAGo,471 +numpy/_core/tests/examples/limited_api/meson.build,sha256=yitMzLuGDhWCjyavpm5UEBrhwKnfXOVAxA3ZL7PlB0Q,1686 +numpy/_core/tests/examples/limited_api/setup.py,sha256=N7kqsVp4iIE20IebigEJUW3nW2F0l6Vthb5qNvKHBmM,457 +numpy/_core/tests/test__exceptions.py,sha256=gy7-mZq7XS5z_w-us4gRIzC0H7XqC_62xaQQmWqLzSw,2970 +numpy/_core/tests/test_abc.py,sha256=u82wrSKXJ2V7AmNrh4klHxYiqOx0BYWJ4j7hqTMH--A,2275 +numpy/_core/tests/test_api.py,sha256=bURvc6MoIHlij2SrsgiDyafAQ-AyQDFFmbIzr-yo8Es,23546 +numpy/_core/tests/test_argparse.py,sha256=vPctuxToPkZMlbgjnzE924XkxXYUdBxlR6LsP2_-aQM,2914 +numpy/_core/tests/test_array_api_info.py,sha256=7n9-LJv-wgAMVbfK1JG7dQAU2WBYQbO7yeN4rP38Ltg,3174 +numpy/_core/tests/test_array_coercion.py,sha256=vG5HHfLgl1HcP6oemFxvpYqibS0eWqRAKCxLCiZBjaY,35744 +numpy/_core/tests/test_array_interface.py,sha256=E6QR-DJYTJX_F-i70PakQMmvxzfSBD-W1rFve70MFTg,7986 +numpy/_core/tests/test_arraymethod.py,sha256=b7DeRtgzSCTzoPiS1BT1Wwvpr31g_YP44Dd4V6uaR-U,3339 +numpy/_core/tests/test_arrayobject.py,sha256=cQu4aDjyF6EgoiGe5UISyOHGx5QEkdGvbfCXVuKjHQ8,2671 +numpy/_core/tests/test_arrayprint.py,sha256=RERzgbVQ3mumSGJZhYV9LesTkcsMH8TrHMBPmPwkBTg,50349 +numpy/_core/tests/test_casting_floatingpoint_errors.py,sha256=FRRWJBppa5v1axij6L14ENmzoZS8R_SyJKgHiAFI2KQ,5228 +numpy/_core/tests/test_casting_unittests.py,sha256=FCokQoS_56dOoBjq1WSp2UVE2NE9WS2w2u-4xNBQjMM,35126 +numpy/_core/tests/test_conversion_utils.py,sha256=cz2WEiCYSEP9m_7RHa2pS8WW0PcWO0E-LvpLTO72PkE,6814 +numpy/_core/tests/test_cpu_dispatcher.py,sha256=Bpb_ep7kT3OfNypV1pSOWCNlk8oT46kjZBEGS32qfCI,1597 +numpy/_core/tests/test_cpu_features.py,sha256=GO_Uf6FAK2pX8kiI9R1Uv7oEFzuPVzp3hu3bm6cZCuU,15838 +numpy/_core/tests/test_custom_dtypes.py,sha256=JEeRO7ykZSPYDTlKEV4EWjlMWJwEWieiCCsN4aEd0WA,11934 +numpy/_core/tests/test_cython.py,sha256=bgJxvIlQlMxxFA-Hqlgqo7NIvEKPZ7FvnY0av-EBfoM,8923 +numpy/_core/tests/test_datetime.py,sha256=3KyUxJqrgCRvUomM3Pxw_wCodRjWYRcp-92Bggiqk2U,124376 +numpy/_core/tests/test_defchararray.py,sha256=iOO8AUBOwhadTI8UUlOw-tI0Dd0l4k1rLY9gWFuQLbw,31423 +numpy/_core/tests/test_deprecations.py,sha256=y76kFkCfXImnwdjBA6WJSSKPtFl_d6K3O-hCRT6_LAQ,29255 +numpy/_core/tests/test_dlpack.py,sha256=MKc9PdCk1Tboz8RpMbrZzLFwkTEEEj-wQ5kZ1NeHtNI,5990 +numpy/_core/tests/test_dtype.py,sha256=aQLvZkJzZX5qt9uGfXilf-o8xlIQ8p-ZDBUczLQnED8,79365 +numpy/_core/tests/test_einsum.py,sha256=xOIA5Co2_FxHkzr0b-acAW5f_FOtnGDhQvS4gB1mLdM,54119 +numpy/_core/tests/test_errstate.py,sha256=MjV1p7tDq7LpAgIT730lMNVFsxN0RVK4CtRJgBkpVlk,4763 +numpy/_core/tests/test_extint128.py,sha256=YKIX0q9ENW0qehJtdaAAB2sFG0me42U2yJmq0kK6xGQ,5863 +numpy/_core/tests/test_function_base.py,sha256=1FGoTuZLK_r0567gNARFIXqLhIY6QA9mqkh6rMGMLNw,17950 +numpy/_core/tests/test_getlimits.py,sha256=k9_TaYqBCL-OvYpyvWAoTxpCwZSpxYFvvr2R7vuPEeg,7180 +numpy/_core/tests/test_half.py,sha256=7M6VWJnBU7pnpGuoZc1hiltB5-rn9PkDEXI-EmtNKSA,24880 +numpy/_core/tests/test_hashtable.py,sha256=-Zl-uppJbc9kwPN_ZlxJMA76yAQKsgGmQQWI8Y-sxaM,1182 +numpy/_core/tests/test_indexerrors.py,sha256=keWclNvFu3ukhVSXc97w3bJM8BvkOpul6kjNudf1F2Q,4858 +numpy/_core/tests/test_indexing.py,sha256=jbYs0Mdj_4w1XRBnrBCbUzZR9o1vdT3qHvo0YE3-yas,56741 +numpy/_core/tests/test_item_selection.py,sha256=zaGuMcTDsbCpQO1k9c9xuc4jUWhbArfn_1INfilf9hk,6623 +numpy/_core/tests/test_limited_api.py,sha256=oz7wOz7VRbrsP_60SaCiMl69GZlz-3J4b6S_9GsjF7A,3404 +numpy/_core/tests/test_longdouble.py,sha256=kcu2DpPuw-j0on0INw-LNMOjw4wuXI_fPbvn-9n-Oks,14285 +numpy/_core/tests/test_machar.py,sha256=z0mwyf6ASFI-gtMemFAag-8eEXKjb12mZ1BSpLYA52Q,1099 +numpy/_core/tests/test_mem_overlap.py,sha256=fZMHusU29yuYAdMqkmLcfj209q8xjaY23IxwBPSUnoE,30071 +numpy/_core/tests/test_mem_policy.py,sha256=Avw90zmQ5zjIvecpG0hV50UcKMaxVYkbmWQSdNuT6iA,17109 +numpy/_core/tests/test_memmap.py,sha256=4PvMpV7EpYuCAlPkO1s8TiME75_G_V1toBm0ADizLpY,8372 +numpy/_core/tests/test_multiarray.py,sha256=Su3uKM4LhZuMRhn8kyrwGqg-CZe93GRgaPgm0hrBZBs,402650 +numpy/_core/tests/test_multithreading.py,sha256=P6JP2x-YqSU6gnzLGtK2VJ1mWeoJP7i-zxPpU46EDxU,8899 +numpy/_core/tests/test_nditer.py,sha256=VrX91QX1nd3pWNM8MqxwYkDk_7swfuW0IACB86FBNVA,134550 +numpy/_core/tests/test_nep50_promotions.py,sha256=NbdzCpLbwWHWUXBk4JxM5FFIa-YqibgHV2rEawS3h2U,10354 +numpy/_core/tests/test_numeric.py,sha256=ol8-6PemWKQAGxpNGIgb-HjAwA13GU3IX4CBY84Wdms,162700 +numpy/_core/tests/test_numerictypes.py,sha256=hQ1YqasQ6mq--7fnKO08koqVrnsK2IwloWcdElKB7U4,23912 +numpy/_core/tests/test_overrides.py,sha256=_FsqndcyRN3r0JOp3Nn7_xAZPvGEypReo9PAdofeavY,28733 +numpy/_core/tests/test_print.py,sha256=HhOMC4roNrnXdncgpXPmFgsJWwcRpCc9u3KOIMSRxDw,7038 +numpy/_core/tests/test_protocols.py,sha256=19yXLJKXmy1-ArAclZ-F7PtgN6LmIHbiPvg6Vi2PkJk,1234 +numpy/_core/tests/test_records.py,sha256=beGD-yv67DC-eav0VNeGLh06uIMwKp3IDEV-i2KySN4,21074 +numpy/_core/tests/test_regression.py,sha256=5msZd_gdmBisnQrvKEysm-Nwjz84stp0n_c-Oqjmt3k,98058 +numpy/_core/tests/test_scalar_ctors.py,sha256=CrPYj6xo5A52VVqecc9S8Q0JQWPPyU2pND5KUNX_-pw,6923 +numpy/_core/tests/test_scalar_methods.py,sha256=CQARDMdU_T8LBg1sAdJ6PmRalpAK2CFSMH37AvLCmW0,9388 +numpy/_core/tests/test_scalarbuffer.py,sha256=0d8LgyIclgmi4wJM218frNDXa4V408McDFDkieBpJFo,5735 +numpy/_core/tests/test_scalarinherit.py,sha256=0JukiC7eR6NwWZgFy-YBmAXYIaA2BmudgY3Rt8ziX-I,2693 +numpy/_core/tests/test_scalarmath.py,sha256=2A6CgAaeFUEqUBo4beyWi6UBwA-eyaE_0Is364Rj3cQ,47796 +numpy/_core/tests/test_scalarprint.py,sha256=7BJSHWTeVvKtzt_IUgCgYcSp8uwTxbUlplGmV2edNHE,21058 +numpy/_core/tests/test_shape_base.py,sha256=rqKjKS69o6NPEtqVUvRm4vc2zlrJ87QEy4PipMDlMXI,31842 +numpy/_core/tests/test_simd.py,sha256=1KRDlvrx6MGvBLcFvGESoN5DSxQq0GKcvZeSrtRFL1Y,49985 +numpy/_core/tests/test_simd_module.py,sha256=s22tqYtgN0x-5B3HTXiGfIV2aTXyQQH18c1fYj6VRhg,4004 +numpy/_core/tests/test_stringdtype.py,sha256=B8B5ZhCCwWAbMa0xKJ9k3R02nBrGys_NPDccDeOZeT4,59491 +numpy/_core/tests/test_strings.py,sha256=Hvo2dgAn4ViPIL0ej_S6PRCM-0UG3gT2K4uYmCqhhk4,53076 +numpy/_core/tests/test_ufunc.py,sha256=CthK8X2cvLRuASHDY4N1OHgqcN1pp5s1JZnd0VBA52U,135606 +numpy/_core/tests/test_umath.py,sha256=jgOcWeFjvQElbDJHdW0Eb1_IynBS2Kqp3Y2hpDmfUzM,198085 +numpy/_core/tests/test_umath_accuracy.py,sha256=ZW-NBEcRBWtbjzhPmk9fSpN3skQBuMgEoHS87zLmedk,5593 +numpy/_core/tests/test_umath_complex.py,sha256=ZRnJuFo6DQPz5tdUUZyHSamtaI2BFlLXzz6AtlILVIw,23912 +numpy/_core/tests/test_unicode.py,sha256=Y5VSmuMrpzuN9lNGB9gNDkUCl1c6qhiQRp2_kOc2V50,13221 +numpy/_core/umath.py,sha256=3_OTbmiMhaVNsxpH2xKV9l6za59rYXBry6gb1WgJaI0,2133 +numpy/_core/umath.pyi,sha256=9o4EBYeibP9abowHQHuo0iuhbUnfTWw5c8utNmKEduo,2840 +numpy/_distributor_init.py,sha256=ahBbZPz-mGZrmwx35FHQ26AiinST78FxvupiBBKGFp4,422 +numpy/_distributor_init.pyi,sha256=CSrbSp2YYxHTxlX7R0nT3RpH7EloB1wIvo7YOA7QWy8,28 +numpy/_expired_attrs_2_0.py,sha256=uPUSplSC6_x_NhynoAup3ZCf3ydt7MGrBNwJc2dBiL8,3983 +numpy/_expired_attrs_2_0.pyi,sha256=ZHjc6ZjYC1jKXXwLh4wylr6P1bYnlQ75sUigDNqaXoA,1332 +numpy/_globals.py,sha256=FWUxIto9hQ5Mi2NoxP6DeGpI3bgS8H9xq7jfzaVLtG0,3185 +numpy/_globals.pyi,sha256=kst3Vm7ZbznOtHsPya0PzU0KbjRGZ8xhMmTNMafvT-4,297 +numpy/_pyinstaller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__pycache__/__init__.cpython-310.pyc,, +numpy/_pyinstaller/__pycache__/hook-numpy.cpython-310.pyc,, +numpy/_pyinstaller/hook-numpy.py,sha256=GFGizYFjd9HsYMOtby7gew94CkvTrRW77ECGPNUgGGc,1429 +numpy/_pyinstaller/hook-numpy.pyi,sha256=2Bcwj2FwR3bRdtm26pmpUELEhsiZ58tQv9Q7_1Yp3HU,362 +numpy/_pyinstaller/tests/__init__.py,sha256=ZKqNjqlKw1pYiv57onbjDJnJdVrLawbZAcl-mPZzcSw,345 +numpy/_pyinstaller/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-310.pyc,, +numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-310.pyc,, +numpy/_pyinstaller/tests/pyinstaller-smoke.py,sha256=xt3dl_DjxuzVTPrqmVmMOZm5-24wBG2TxldQl78Xt1g,1175 +numpy/_pyinstaller/tests/test_pyinstaller.py,sha256=31zWlvlAC2sfhdew97x8aDvcYUaV3Tc_0CwFk8pgKaM,1170 +numpy/_pytesttester.py,sha256=6Ii-VI4uz3wiQ5pzNZKdvUT6LOoN868rNzVff7rTlAk,6525 +numpy/_pytesttester.pyi,sha256=Cy1rd-sv9DvmAAEKREy9VI0hYTWVpA_MoBRVmzDyvcY,515 +numpy/_typing/__init__.py,sha256=eXfdON-ITGAVjpprnjbZC9kvco7c-aAolc377D2lqWE,5201 +numpy/_typing/__pycache__/__init__.cpython-310.pyc,, +numpy/_typing/__pycache__/_add_docstring.cpython-310.pyc,, +numpy/_typing/__pycache__/_array_like.cpython-310.pyc,, +numpy/_typing/__pycache__/_char_codes.cpython-310.pyc,, +numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc,, +numpy/_typing/__pycache__/_extended_precision.cpython-310.pyc,, +numpy/_typing/__pycache__/_nbit.cpython-310.pyc,, +numpy/_typing/__pycache__/_nbit_base.cpython-310.pyc,, +numpy/_typing/__pycache__/_nested_sequence.cpython-310.pyc,, +numpy/_typing/__pycache__/_scalars.cpython-310.pyc,, +numpy/_typing/__pycache__/_shape.cpython-310.pyc,, +numpy/_typing/__pycache__/_ufunc.cpython-310.pyc,, +numpy/_typing/_add_docstring.py,sha256=YPYjlxfqC8kXM_amtLyKXJ4aSNKJcmLY-It_PnW1-l4,4148 +numpy/_typing/_array_like.py,sha256=OQB9L3K3TYX6_PRCtvUfy8BgYR1vYCsIkgIqlCTlkkk,5757 +numpy/_typing/_callable.pyi,sha256=fTq5cTBoaRZwl3jFCk_G1eYSEyxHrpCR5-VoQJMMzZM,12176 +numpy/_typing/_char_codes.py,sha256=Qj3t7j_gjoy7ECmVZzz3b5nVMGJBKU5tjPZJ1peGozo,9000 +numpy/_typing/_dtype_like.py,sha256=on_sUDNeC6eQ6Vrlsi4JSJAfxGmIj2M3zhOx0yJtN0Q,6213 +numpy/_typing/_extended_precision.py,sha256=5PhjET4NkRp-LSgffJqfcZ1C5Cp-xERB14FNXfUvRkU,804 +numpy/_typing/_nbit.py,sha256=4E8E67SkSewPvDR15I68KEOneF8gsc97mFEe9oYBcdQ,651 +numpy/_typing/_nbit_base.py,sha256=nN822ixIvBtkyDptX_LESrXoDZ4jjym5ph2FU6APEnk,2980 +numpy/_typing/_nested_sequence.py,sha256=CjG49p-dxretKeShOiyVvTqOoyM_mNyhXArIYY6nBh4,2697 +numpy/_typing/_scalars.py,sha256=sKaaEEZqAQtiEijeuH4U5KPNpG7FYsBtGO73l9dti9Q,1058 +numpy/_typing/_shape.py,sha256=3g0rNpZHxM7rPInBJMSGpbVD9Y0Lw1QtkFEN_yrWEeo,238 +numpy/_typing/_ufunc.py,sha256=SxToNG-O5NumF_yV7JRzAXloNbokV8B8JZC_EcbeNFk,160 +numpy/_typing/_ufunc.pyi,sha256=mhArbicCGnkzBdfyPxLwf6oyAPyR5T2C-jBOk9S3Vq8,27651 +numpy/_utils/__init__.py,sha256=mO41ldWPOHJkTkY-acmJ_8wM89IKRv2lWwUe_5XlUWo,3379 +numpy/_utils/__init__.pyi,sha256=Pmoon5FkvkkbGNr0Xzx6i6i8aoehSnLFlBXNmBRXwi0,769 +numpy/_utils/__pycache__/__init__.cpython-310.pyc,, +numpy/_utils/__pycache__/_convertions.cpython-310.pyc,, +numpy/_utils/__pycache__/_inspect.cpython-310.pyc,, +numpy/_utils/__pycache__/_pep440.cpython-310.pyc,, +numpy/_utils/_convertions.py,sha256=vetZFqC1qB-Z9jvc7RKuU_5ETOaSbjhbKa-sVwYV8TU,347 +numpy/_utils/_convertions.pyi,sha256=zkZfkdBk6-XcyD3zmr7E5sJbYasvyDCInUtWvrtjVhY,122 +numpy/_utils/_inspect.py,sha256=bSIacEhHLtYjTXaMVp1XFPY2IZfybb5bg8X5dYgc8JM,7626 +numpy/_utils/_inspect.pyi,sha256=H1QZ7zEgYyG9kwpfz8cEUhF_QfM96WVBoerQtJZNVDI,2326 +numpy/_utils/_pep440.py,sha256=y5Oppq3Kxn2dH3EWBYSENv_j8XjGUXWvNAiNCEJ-euI,14556 +numpy/_utils/_pep440.pyi,sha256=LdpDFW8iIj_bLbuTbvRr2XWmC9YS9lrpzLR7efqL2GU,3991 +numpy/char/__init__.py,sha256=oQZSAOs7rHme6CxfdL9nraYRNI3NU18MjzQ4kQmK2kA,95 +numpy/char/__init__.pyi,sha256=wolX_qE2bjsIcUfQrQzGjzkaqdMtuGWOVDA3q-2Jqj0,1650 +numpy/char/__pycache__/__init__.cpython-310.pyc,, +numpy/compat/__init__.py,sha256=oqsQeYKpQuJpuTLqMkZX6ssqQfSXs0Joj_S8Ms9KSNU,756 +numpy/compat/__pycache__/__init__.cpython-310.pyc,, +numpy/compat/__pycache__/py3k.cpython-310.pyc,, +numpy/compat/py3k.py,sha256=wcSRGrTokLPxLamRFwBnsWS9z5uAyzsMlTEnCWUqpWw,3946 +numpy/compat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/compat/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/conftest.py,sha256=gViKWIQaPzu9tKZPTt6rIv3LxtvEe9nxrUgPZVDE8UY,8978 +numpy/core/__init__.py,sha256=_lpcaIqNg3TH53JE0JKVKD4X0DOTki2dSvQgjHj6Eek,1307 +numpy/core/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/__pycache__/__init__.cpython-310.pyc,, +numpy/core/__pycache__/_dtype.cpython-310.pyc,, +numpy/core/__pycache__/_dtype_ctypes.cpython-310.pyc,, +numpy/core/__pycache__/_internal.cpython-310.pyc,, +numpy/core/__pycache__/_multiarray_umath.cpython-310.pyc,, +numpy/core/__pycache__/_utils.cpython-310.pyc,, +numpy/core/__pycache__/arrayprint.cpython-310.pyc,, +numpy/core/__pycache__/defchararray.cpython-310.pyc,, +numpy/core/__pycache__/einsumfunc.cpython-310.pyc,, +numpy/core/__pycache__/fromnumeric.cpython-310.pyc,, +numpy/core/__pycache__/function_base.cpython-310.pyc,, +numpy/core/__pycache__/getlimits.cpython-310.pyc,, +numpy/core/__pycache__/multiarray.cpython-310.pyc,, +numpy/core/__pycache__/numeric.cpython-310.pyc,, +numpy/core/__pycache__/numerictypes.cpython-310.pyc,, +numpy/core/__pycache__/overrides.cpython-310.pyc,, +numpy/core/__pycache__/records.cpython-310.pyc,, +numpy/core/__pycache__/shape_base.cpython-310.pyc,, +numpy/core/__pycache__/umath.cpython-310.pyc,, +numpy/core/_dtype.py,sha256=PcSCn7DCpgrvBjm-k4eCMcEiTnH-jPzQmh8FyzLVw9I,331 +numpy/core/_dtype.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_dtype_ctypes.py,sha256=eiZNKCJbzZ1Ei9Tkd7Fffx8vWUsAKnFSK-5vza3vmEQ,359 +numpy/core/_dtype_ctypes.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_internal.py,sha256=HC1NrqDEgK-6M1M6-8ZTZSZF7xnIYPh_G_4j2BFBNLM,972 +numpy/core/_multiarray_umath.py,sha256=vO49_4x5SYg-BST541l73RmBm7pkqbwlssmwsRSdU80,2151 +numpy/core/_utils.py,sha256=dAaZtXVWhOEFiwmVsz8Mn77HsynMDKhZ7HkrjD1Q3vc,944 +numpy/core/arrayprint.py,sha256=qo9GIfdEmW9foxvP0vtFLRaAlSbOoOGJU-hBlQ5hIlA,347 +numpy/core/defchararray.py,sha256=-gCjc9ciILhSzAxtVXgiTwdpuNMD3R6p9tXHe_MLx9A,355 +numpy/core/einsumfunc.py,sha256=LkCSjRQ3HIF4fdRz7uEgl-1TyeT0gtGV5y8x9cQYsZ0,347 +numpy/core/fromnumeric.py,sha256=iQsih718r6QW80auPJbva99qeWfT5IK2S02sv4AFMUs,351 +numpy/core/function_base.py,sha256=V_-tUGZfgjYzjZxvhLNRtVXV2_v12rJsvAGpDXbfq8w,359 +numpy/core/getlimits.py,sha256=SQsTlDpDVz9AvFC-xvAJbhcm5svBD02qpE-HLgt17RA,343 +numpy/core/multiarray.py,sha256=2K7g3jXbH7wqupSsyr5wP0YoQSpXlZab9uDDbJtz2Bk,816 +numpy/core/numeric.py,sha256=nTvwcwAqkzCnYmqEt4J3dvqUodzXUlaI8H5YF5x65xg,370 +numpy/core/numerictypes.py,sha256=jmQ9c1WrWxlx8ODDZKOAqrixUu3Gx_NJD1SzT3wtb50,355 +numpy/core/overrides.py,sha256=Dq-lTb829gvg-HfRtY0BE6GE2UbI6iXkMIh8Gvkzt1g,343 +numpy/core/overrides.pyi,sha256=HScieJk23k4Lk14q8u9CEc3ZEVOQ6hGu_FeWDR2Tyu8,532 +numpy/core/records.py,sha256=5jPtgEtHaJ642Ct-G9uEwnF9y_TZnZAUXm_EUJEF8J8,335 +numpy/core/shape_base.py,sha256=itirz4hN3M8Ndgij4_ZVcra4qtRkK42Owp8qr9fFe5w,347 +numpy/core/umath.py,sha256=09uNybUqfWxdqkoYHzv6jrTDCXq6DDI-EdwaOKdijn4,327 +numpy/ctypeslib.py,sha256=9ejyo77Qqd54f9j7pRQQaABYAfVxjWfgKvia88T9hP4,19438 +numpy/ctypeslib.pyi,sha256=7CY_Na2E0uwZ88TzJ3pasogyLIr0wd8scdcxY0LK21A,8338 +numpy/distutils/__init__.py,sha256=sh1TV9_aW0YWvmHfBPtbZKCRcZTN6BnxKV-mIAG2vuY,2138 +numpy/distutils/__init__.pyi,sha256=6KiQIH85pUXaIlow3KW06e1_ZJBocVY6lIGghNaW33A,123 +numpy/distutils/__pycache__/__init__.cpython-310.pyc,, +numpy/distutils/__pycache__/_shell_utils.cpython-310.pyc,, +numpy/distutils/__pycache__/armccompiler.cpython-310.pyc,, +numpy/distutils/__pycache__/ccompiler.cpython-310.pyc,, +numpy/distutils/__pycache__/ccompiler_opt.cpython-310.pyc,, +numpy/distutils/__pycache__/conv_template.cpython-310.pyc,, +numpy/distutils/__pycache__/conv_template.cpython-310.pyc,sha256=fyHEvzWlDsUAMPJilcJGYRmrErHEWK91SC_qL7i2ACQ,8278 +numpy/distutils/__pycache__/core.cpython-310.pyc,, +numpy/distutils/__pycache__/cpuinfo.cpython-310.pyc,, +numpy/distutils/__pycache__/exec_command.cpython-310.pyc,, +numpy/distutils/__pycache__/extension.cpython-310.pyc,, +numpy/distutils/__pycache__/from_template.cpython-310.pyc,, +numpy/distutils/__pycache__/fujitsuccompiler.cpython-310.pyc,, +numpy/distutils/__pycache__/intelccompiler.cpython-310.pyc,, +numpy/distutils/__pycache__/lib2def.cpython-310.pyc,, +numpy/distutils/__pycache__/line_endings.cpython-310.pyc,, +numpy/distutils/__pycache__/log.cpython-310.pyc,, +numpy/distutils/__pycache__/mingw32ccompiler.cpython-310.pyc,, +numpy/distutils/__pycache__/misc_util.cpython-310.pyc,, +numpy/distutils/__pycache__/msvc9compiler.cpython-310.pyc,, +numpy/distutils/__pycache__/msvccompiler.cpython-310.pyc,, +numpy/distutils/__pycache__/npy_pkg_config.cpython-310.pyc,, +numpy/distutils/__pycache__/numpy_distribution.cpython-310.pyc,, +numpy/distutils/__pycache__/pathccompiler.cpython-310.pyc,, +numpy/distutils/__pycache__/system_info.cpython-310.pyc,, +numpy/distutils/__pycache__/unixccompiler.cpython-310.pyc,, +numpy/distutils/_shell_utils.py,sha256=TDc8sp986sdmW06JwOaIaN5XVqG2t4HEfs8SdCpwU50,2625 +numpy/distutils/armccompiler.py,sha256=6sKNp543q_4NafErHoFOPKz8R3YJR9soDCr1WeFr5Xk,988 +numpy/distutils/ccompiler.py,sha256=TFzGS6MmE2JSChohLSvJ955mtV1339u7gfFar1O4seI,29516 +numpy/distutils/ccompiler_opt.py,sha256=6lKyYwOGGBNYjzSznBwnTyW4fBAfwlFw2nSkSvPPozI,103064 +numpy/distutils/checks/cpu_asimd.c,sha256=Nit4NvYvo3XWtBKeV6rmIszdNLu9AY81sqMFCTkKXBE,845 +numpy/distutils/checks/cpu_asimddp.c,sha256=bQP32IzQZANu9aFu3qkovLYJXKCm0bJ6srsO5Ho2GKI,448 +numpy/distutils/checks/cpu_asimdfhm.c,sha256=xJjmEakgtmK9zlx2fIT6UZ4eZreLzdCoOVkkGPyzXFA,548 +numpy/distutils/checks/cpu_asimdhp.c,sha256=0eTZ2E1Gyk3G5XfkpSN32yI9AC3SUwwFetyAOtEp5u4,394 +numpy/distutils/checks/cpu_avx.c,sha256=69aCE28EArV-BmdFKhCA5djgNZAZtQg2zdea3VQD-co,799 +numpy/distutils/checks/cpu_avx2.c,sha256=207hFoh4ojzMAPQ53ug_Y5qCFIgZ1e8SdI1-o2jzdB4,769 +numpy/distutils/checks/cpu_avx512_clx.c,sha256=CfPjudkRZ9_xygLVOySSEjoAfkjjfu4ipkWK4uCahbU,864 +numpy/distutils/checks/cpu_avx512_cnl.c,sha256=eKCPRk6p1B0bPAyOY0oWRKZMfa-c5g-skvJGGlG5I4Y,972 +numpy/distutils/checks/cpu_avx512_icl.c,sha256=Zt8XOXZL85Ds5HvZlAwUVilT6mGbPU44Iir44ul6y2Y,1030 +numpy/distutils/checks/cpu_avx512_knl.c,sha256=0itGNg9s9gFjsj79qQvsZR-xceTTcpw4qa0OOAmq_Sg,984 +numpy/distutils/checks/cpu_avx512_knm.c,sha256=iVdJnZ5HY59XhUv4GzwqYRwz2E_jWJnk1uSz97MvxY0,1162 +numpy/distutils/checks/cpu_avx512_skx.c,sha256=aOHpYdGPEx2FcnC7TKe9Nr7wQ0QWW20Uq3xRVSb4U90,1036 +numpy/distutils/checks/cpu_avx512_spr.c,sha256=ziSmzNQZ_k3j5FrAWSKfAAW_g3l8tq8t6InVPWEUx9Y,930 +numpy/distutils/checks/cpu_avx512cd.c,sha256=zIl7AJXfxqnquZyHQvUAGr9M-vt62TIlylhdlrg-qkE,779 +numpy/distutils/checks/cpu_avx512f.c,sha256=ibW0zon6XGYkdfnYETuPfREmE5OtO0HfuLTqXMsoqNA,775 +numpy/distutils/checks/cpu_f16c.c,sha256=QxxI3vimUAkJ4eJ83va2mZzTJOk3yROI05fVY07H5To,890 +numpy/distutils/checks/cpu_fma3.c,sha256=Cq0F_UpVJ4SYHcxXfaYoqHSYvWRJzZsB8IkOVl8K2ro,839 +numpy/distutils/checks/cpu_fma4.c,sha256=Xy0YfVpQDCiFOOrCWH-RMkv7ms5ZAbSauwm2xEOT94o,314 +numpy/distutils/checks/cpu_neon.c,sha256=I-R8DHE6JfzqmPpaF4NTdWxq5hEW-lJZPjSjW8ynFgo,619 +numpy/distutils/checks/cpu_neon_fp16.c,sha256=6hdykX7cRL3ruejgK3bf_IXGQWol8OUITPEjvbz_1Hc,262 +numpy/distutils/checks/cpu_neon_vfpv4.c,sha256=IY4cT03GTrzEZKLd7UInKtYC0DlgugFGGrkSTfwwvmU,630 +numpy/distutils/checks/cpu_popcnt.c,sha256=Jkslm5DiuxbI-fBcCIgJjxjidm-Ps_yfAb_jJIZonE8,1081 +numpy/distutils/checks/cpu_rvv.c,sha256=hXM8c3JEjDRSf1vn3IWG0VSuno7QLrUlTegVvXXpYG4,313 +numpy/distutils/checks/cpu_sse.c,sha256=XitLZu_qxXDINNpbfcUAL7iduT1I63HjNgtyE72SCEo,706 +numpy/distutils/checks/cpu_sse2.c,sha256=OJpQzshqCS6Cp9X1I1yqh2ZPa0b2AoSmJn6HdApOzYk,717 +numpy/distutils/checks/cpu_sse3.c,sha256=AmZkvTpXcoCAfVckXgvwloutI5CTHkwHJD86pYsntgk,709 +numpy/distutils/checks/cpu_sse41.c,sha256=5GvpgxPcDL39iydUjKyS6WczOiXTs14KeXvlWVOr6LQ,695 +numpy/distutils/checks/cpu_sse42.c,sha256=8eYzhquuXjRRGp3isTX0cNUV3pXATEPc-J-CDYTgTaU,712 +numpy/distutils/checks/cpu_ssse3.c,sha256=QXWKRz5fGQv5bn282bJL4h_92-yqHFG_Gp5uLKvcA34,725 +numpy/distutils/checks/cpu_sve.c,sha256=QgBJTJ_cTDz85ZLSMU7cQbpaiv8Bwb6Ma1HfCoX3l5c,301 +numpy/distutils/checks/cpu_vsx.c,sha256=gxWpdnkMeoaBCzlU_j56brB38KFo4ItFsjyiyo3YrKk,499 +numpy/distutils/checks/cpu_vsx2.c,sha256=ycKoKXszrZkECYmonzKd7TgflpZyVc1Xq-gtJqyPKxs,276 +numpy/distutils/checks/cpu_vsx3.c,sha256=pNA4w2odwo-mUfSnKnXl5SVY1z2nOxPZZcNC-L2YX1w,263 +numpy/distutils/checks/cpu_vsx4.c,sha256=SROYYjVVc8gPlM4ERO--9Dk2MzvAecZzJxGKO_RTvPM,319 +numpy/distutils/checks/cpu_vx.c,sha256=v1UZMj78POCN7sbFmW6N0GM_qQSUwHxiF15LQYADIUs,477 +numpy/distutils/checks/cpu_vxe.c,sha256=1w8AvS6x8s_zTgcrDEGMKQmSqpJRX2NLprdSu_ibyjk,813 +numpy/distutils/checks/cpu_vxe2.c,sha256=fY9P2fWo-b08dy4dmuNNc_xX3E0ruPRU9zLPzzgD-Z8,645 +numpy/distutils/checks/cpu_xop.c,sha256=sPhOvyT-mdlbf6RlbZvMrslRwHnTFgP-HXLjueS7nwU,246 +numpy/distutils/checks/extra_avx512bw_mask.c,sha256=7IRO24mpcuXRhm3refGWP91sy0e6RmSkmUQCWyxy__0,654 +numpy/distutils/checks/extra_avx512dq_mask.c,sha256=jFtOKEtZl3iTpfbmFNB-u4DQNXXBST2toKCpxFIjEa0,520 +numpy/distutils/checks/extra_avx512f_reduce.c,sha256=hIcCLMm_aXPfrhzCsoFdQiryIrntPqfDxz0tNOR985w,1636 +numpy/distutils/checks/extra_vsx3_half_double.c,sha256=GU-E6yQLdzmOdvO06D0KCkvU4YHyuwFvyydirU_1Clk,366 +numpy/distutils/checks/extra_vsx4_mma.c,sha256=-Pz_qQ55WfWmTWGTH0hvKrFTU2S2kjsVBfIK3w5sciE,520 +numpy/distutils/checks/extra_vsx_asm.c,sha256=anSZskhKZImNk0lsSJJY_8GJQ0h3dDrkrmrGitlS7Fw,981 +numpy/distutils/checks/test_flags.c,sha256=7rgVefVOKOBaefG_6riau_tT2IqI4MFrbSMGNFnqUBQ,17 +numpy/distutils/command/__init__.py,sha256=DCxnKqTLrauOD3Fc8b7qg9U3gV2k9SADevE_Q3H78ng,1073 +numpy/distutils/command/__pycache__/__init__.cpython-310.pyc,, +numpy/distutils/command/__pycache__/autodist.cpython-310.pyc,, +numpy/distutils/command/__pycache__/bdist_rpm.cpython-310.pyc,, +numpy/distutils/command/__pycache__/build.cpython-310.pyc,, +numpy/distutils/command/__pycache__/build_clib.cpython-310.pyc,, +numpy/distutils/command/__pycache__/build_ext.cpython-310.pyc,, +numpy/distutils/command/__pycache__/build_py.cpython-310.pyc,, +numpy/distutils/command/__pycache__/build_scripts.cpython-310.pyc,, +numpy/distutils/command/__pycache__/build_src.cpython-310.pyc,, +numpy/distutils/command/__pycache__/config.cpython-310.pyc,, +numpy/distutils/command/__pycache__/config_compiler.cpython-310.pyc,, +numpy/distutils/command/__pycache__/develop.cpython-310.pyc,, +numpy/distutils/command/__pycache__/egg_info.cpython-310.pyc,, +numpy/distutils/command/__pycache__/install.cpython-310.pyc,, +numpy/distutils/command/__pycache__/install_clib.cpython-310.pyc,, +numpy/distutils/command/__pycache__/install_data.cpython-310.pyc,, +numpy/distutils/command/__pycache__/install_headers.cpython-310.pyc,, +numpy/distutils/command/__pycache__/sdist.cpython-310.pyc,, +numpy/distutils/command/autodist.py,sha256=i2ip0Zru8_AFx3lNQhlZfj6o_vg-RQ8yu1WNstcIYhE,3866 +numpy/distutils/command/bdist_rpm.py,sha256=9uZfOzdHV0_PRUD8exNNwafc0qUqUjHuTDxQcZXLIbg,731 +numpy/distutils/command/build.py,sha256=6IbYgycGcCRrrWENUBqzAEhgtUhCGLnXNVnTCu3hxWc,2675 +numpy/distutils/command/build_clib.py,sha256=x8CjWbraTjai7wdSwq16VBWMQw5w20BmCj_iHdzDc14,19786 +numpy/distutils/command/build_ext.py,sha256=XfbdWZdqQKwqibpb8VT2ODlrcftrigfFVneLl97P3Zk,33735 +numpy/distutils/command/build_py.py,sha256=xBHZCtx91GqucanjIBETPeXmR-gyUKPDyr1iMx1ARWE,1175 +numpy/distutils/command/build_scripts.py,sha256=AEQLNmO2v5N-GXl4lwd8v_nHlrauBx9Y-UudDcdCs_A,1714 +numpy/distutils/command/build_src.py,sha256=njEPAEftbBAQ8K6uARjA1N_CkbCDwlB59p3wue5IfZg,31951 +numpy/distutils/command/config.py,sha256=IBU66VZXvuPfEYxMXImJpG8b0HW1UDlNBoLVrLyKLDA,21186 +numpy/distutils/command/config_compiler.py,sha256=SKLghaFsJl0uQTTdXh36NRtDii7Y8tI9jA6rtAMjlHw,4497 +numpy/distutils/command/develop.py,sha256=5ro-Sudt8l58JpKvH9FauH6vIfYRv2ohHLz-9eHytbc,590 +numpy/distutils/command/egg_info.py,sha256=n6trbjRfD1qWc_hRtMFkOJsg82BCiLvdl-NeXyuceGc,946 +numpy/distutils/command/install.py,sha256=iK5ls63o6WqVOreU-mG5HZSkx90qYhMQvlo2FaaQWWg,3152 +numpy/distutils/command/install_clib.py,sha256=q3yrfJY9EBaxOIYUQoiu2-juNKLKAKKfXC0nrd4t6z0,1439 +numpy/distutils/command/install_data.py,sha256=r8EVbIaXyN3aOmRugT3kp_F4Z03PsVX2l_x4RjTOWU4,872 +numpy/distutils/command/install_headers.py,sha256=HZo3To_7tpls2ZomDnaxdP32oSUVQsFeCjbD8jDZXFY,945 +numpy/distutils/command/sdist.py,sha256=XQM39b-MMO08bfE3SJrrtDWwX0XVnzCZqfAoVuuaFuE,760 +numpy/distutils/conv_template.py,sha256=hL0DDy7tMJ-5I-63BmkWkoLNX2c5GiQdQhj-XNG3Tm8,9865 +numpy/distutils/core.py,sha256=4vvNzpLy_9AfakXgzC6OITRThJd4OdfSmrzxhYu49Fc,8388 +numpy/distutils/cpuinfo.py,sha256=l5G7myXNwEOTynBIEitH-ghaF8Zw5pHQAjaYpPKNtTQ,23322 +numpy/distutils/exec_command.py,sha256=B-iAz7woqzcwcP-Y2IUh3yOI_BLzseFFe0p0K9U5gPo,10597 +numpy/distutils/extension.py,sha256=U4vHJeem4kWsK_5KUnmp1qCG0qO6PI5yQjchUvHnwlw,3561 +numpy/distutils/fcompiler/__init__.py,sha256=UncOSqwlhHdNNSViIibqy51Prrkd589e1C06sTtnYww,41660 +numpy/distutils/fcompiler/__pycache__/__init__.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/absoft.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/arm.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/compaq.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/environment.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/g95.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/gnu.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/hpux.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/ibm.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/intel.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/lahey.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/mips.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/nag.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/none.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/nv.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/pathf95.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/pg.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/sun.cpython-310.pyc,, +numpy/distutils/fcompiler/__pycache__/vast.cpython-310.pyc,, +numpy/distutils/fcompiler/absoft.py,sha256=J5Nn8PXD0VNUjCI5Vj6PD8JRS6Dxi5Zz5LCa8fkPZIY,5672 +numpy/distutils/fcompiler/arm.py,sha256=Bpftt3HnmJc3Iyt8-nwsNv86JqdFYK0JMwh3CC8nP_k,2161 +numpy/distutils/fcompiler/compaq.py,sha256=yyReqFAq42dy1zscMAV0GqVaYW7Iao1HtAUpnv5XTec,4023 +numpy/distutils/fcompiler/environment.py,sha256=PVS1al3wahDNnneNVSl1sQhMPfz2dUXaIDVJfy0wZBU,3168 +numpy/distutils/fcompiler/fujitsu.py,sha256=g4dTLDFfLRAzhYayIwyHGBw1Y36DKtPOCYfA823ldNA,1379 +numpy/distutils/fcompiler/g95.py,sha256=1TJe4IynWYqqYBy8gJ-nz8WQ_TaSbv8k2UzUIY5Erqc,1372 +numpy/distutils/fcompiler/gnu.py,sha256=6V_Ly_lwEEsfUDSz0vCDg86EhWlajHuyBy_ioLqKCdM,21057 +numpy/distutils/fcompiler/hpux.py,sha256=SLbDOPYgiixqE32GgUrAJjpDLFy9g7E01vGNZCGv6Pc,1394 +numpy/distutils/fcompiler/ibm.py,sha256=P8NMedMGxlCvVRoVIj4GKF65IP1TUe7jmlt-1KscVYo,3631 +numpy/distutils/fcompiler/intel.py,sha256=rlm017cVcyjIy1_s8a4lNHJ8ilo6TiYcIA_tuPojapY,6781 +numpy/distutils/fcompiler/lahey.py,sha256=EV3Zhwq-iowWAu4BFBPv_UGJ-IB-qxlxmi6WU1qHDOs,1372 +numpy/distutils/fcompiler/mips.py,sha256=mlUNgGrRSLnNhtxQXWVfC9l4_OP2GMvOkgbZQwBon0A,1768 +numpy/distutils/fcompiler/nag.py,sha256=FpoDQWW_Y3Anm9-Psml-eNySCGzCp9_jP2Ej4_AwDy8,2864 +numpy/distutils/fcompiler/none.py,sha256=auMK2ou1WtJ20LeMbwCZJ3XofpT9A0YYbMVd-62Mi_E,786 +numpy/distutils/fcompiler/nv.py,sha256=40IYfxm5ppkYtSaX8seMg9NGynvXrZFkcLDonxbKfW4,1594 +numpy/distutils/fcompiler/pathf95.py,sha256=ipbaZIO8sqPJ1lUppOurnboiTwRzIasWNAJvKmktvv4,1094 +numpy/distutils/fcompiler/pg.py,sha256=cVcSFM9oR0KmO5AIb4Odw9OGslW6zvDGP88n-uEwxvQ,3696 +numpy/distutils/fcompiler/sun.py,sha256=JMdFfKldTYlfW1DxV7nR09k5PZypKLWpP7wmQzmlnH0,1628 +numpy/distutils/fcompiler/vast.py,sha256=JUGP68JGOUOBS9WbXftE-qCVUD13fpLyPnhpHfTL5y0,1719 +numpy/distutils/from_template.py,sha256=BL-vypfI0GNJrTo-nKs445liTW2Qdfvrsu8RMjATL5A,8174 +numpy/distutils/fujitsuccompiler.py,sha256=JWVPhI1oH4v2iKzDP8VjcnJIKYXZFYcYCwdpDxhURvw,862 +numpy/distutils/intelccompiler.py,sha256=1ZN9JVEemp98S-kxlSjpRaqx-aUE1YmGgHy0mFQtrMc,4128 +numpy/distutils/lib2def.py,sha256=KnWZJaOsxmx57MEJxrsdPAlZbQBgu-27bSCjwO8cI6k,3746 +numpy/distutils/line_endings.py,sha256=hlI71r840mhfu8lmzdHPVZ4NFm-kJDDUMV3lETblVTY,2109 +numpy/distutils/log.py,sha256=a5-sPwcZei7kSP0ZQZH4tTrlRWHnL8jtzLCeUSPA_04,2990 +numpy/distutils/mingw/gfortran_vs2003_hack.c,sha256=FDTA53KYTIhil9ytvZlocOqghQVp9LacLHn1IurV0wI,83 +numpy/distutils/mingw32ccompiler.py,sha256=kgOSpzL8n_hXK8erym22LZ2pBvI38V7GwI_qk8mWLVw,22748 +numpy/distutils/misc_util.py,sha256=re8nNlckODPh9fFvDJflT-UeOV594mgRSX-IANr1I_o,91723 +numpy/distutils/msvc9compiler.py,sha256=bCtCVJmGrBHPm9sOoxa3oSrdrEVCNQFEM5O5hdqX8Hc,2255 +numpy/distutils/msvccompiler.py,sha256=gqQySO-P6Egk3qgrNlyCF3ze_U47lIO9SrbFJrCQCO8,2723 +numpy/distutils/npy_pkg_config.py,sha256=t2-OG_QrnZEeQsagpJF4sLN9C7RMlnWGOW4K88wEvx0,13459 +numpy/distutils/numpy_distribution.py,sha256=nrdp8rlyjEBBV1tzzi5cE-aYeXB5U3X8T5-G0akXSoY,651 +numpy/distutils/pathccompiler.py,sha256=a5CYDXilCaIC85v0fVh-wrb0fClv0A7mPS87aF1inUc,734 +numpy/distutils/system_info.py,sha256=LeXQf1Zvo40QB0dTwx3Skc2f7Pkv9RQou3sJtFeD8po,117171 +numpy/distutils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/distutils/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_build_ext.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_ccompiler_opt.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_ccompiler_opt_conf.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_exec_command.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_fcompiler.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_from_template.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_log.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_mingw32ccompiler.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_misc_util.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_shell_utils.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/test_system_info.cpython-310.pyc,, +numpy/distutils/tests/__pycache__/utilities.cpython-310.pyc,, +numpy/distutils/tests/test_build_ext.py,sha256=VnrowSutoLRrv218BT4BQ4fZ-8x7Q4otFLyF6wVjugg,2853 +numpy/distutils/tests/test_ccompiler_opt.py,sha256=YAR76iKLsRIpRfS2XmKunsyHaiDzyGK-T47oNI7WmyE,29586 +numpy/distutils/tests/test_ccompiler_opt_conf.py,sha256=3KyqLepj3nC2C1UYm8nv1Ne5O6KtufD-7DlvAYJuvOo,6523 +numpy/distutils/tests/test_exec_command.py,sha256=EVipBhoXEJjlSwtQRptWJC1LNJc6wfYzu_81V2jdAL8,7612 +numpy/distutils/tests/test_fcompiler.py,sha256=SS5HOLIg0eqkmZTRKeWq9_ahW2tmV9c9piwYfzcBPmc,1320 +numpy/distutils/tests/test_fcompiler_gnu.py,sha256=RlRHZbyazgKGY17NmdYSF3ehO0M0xXN4UkbsJzJz4i8,2191 +numpy/distutils/tests/test_fcompiler_intel.py,sha256=4cppjLugoa8P4bjzYdiPxmyCywmP9plXOkfsklhnYsQ,1088 +numpy/distutils/tests/test_fcompiler_nagfor.py,sha256=ntyr8f-67dNI0OF_l6-aeTwu9wW-vnxpheqrc4cXAUI,1124 +numpy/distutils/tests/test_from_template.py,sha256=ZzUSEPyZIG4Zak3-TFqmRGXHMp58aKTuLKb0t-5XpDg,1147 +numpy/distutils/tests/test_log.py,sha256=ylfdL0kBkbjj_Tgqx47UGykAtpE_mJkLndL40p11AYc,902 +numpy/distutils/tests/test_mingw32ccompiler.py,sha256=7X8V4hLMtsNj1pYoLkSSla04gJu66e87E_k-6ce3PrA,1651 +numpy/distutils/tests/test_misc_util.py,sha256=91koMzWbDZktEvkfdCByVHzViHHULmn0WRcA_D-YSjQ,3452 +numpy/distutils/tests/test_npy_pkg_config.py,sha256=1pQh-mApHjj0y9Ba2tqns79U8dsfDpJ9zcPdsa2qbps,2641 +numpy/distutils/tests/test_shell_utils.py,sha256=aKtyXpHEYARNsAq9q5SeVC0qqMfm1gzvlN6-nXOVlac,2193 +numpy/distutils/tests/test_system_info.py,sha256=-j438GufVq6Vicimybm1XxndwwiXGKuYTEb78gfY5Ws,11739 +numpy/distutils/tests/utilities.py,sha256=d49suMzR_1sAXU0OO5kD7msJfBtmvv7yZZCCWIxXKY4,2377 +numpy/distutils/unixccompiler.py,sha256=ED_e7yHVNj4oXMze6KY8TbPxjyvHDC6o4VNGAkFA5ZQ,5567 +numpy/doc/__pycache__/ufuncs.cpython-310.pyc,, +numpy/doc/ufuncs.py,sha256=jMnfQhRknVIhgFVS9z2l5oYM8N1tuQtf5bXMBL449oI,5552 +numpy/dtypes.py,sha256=cPkS6BLRvpfsUzhd7Vk1L7_VcenWb1nuHuCxc9fYC4I,1353 +numpy/dtypes.pyi,sha256=9Gys5OIDUcglbDCgnJqubAljutcy_NUtmqWUy9-rRB0,15787 +numpy/exceptions.py,sha256=8or6nB2di0rsXpxLrmoUI4nH5bsyAIInsBfMDYL1RS8,8085 +numpy/exceptions.pyi,sha256=baBkfJ_DQdH6AH7roIXq8JSlY5Wn4z_hdJVbo_1SQUE,776 +numpy/f2py/__init__.py,sha256=WZXe6JMmUBaRuBtosCRzno0roeUj8CEoQw9g2_RRokc,2590 +numpy/f2py/__init__.pyi,sha256=0_-xXhZztqkodDS2UJTGZAdLO8JkzE7LMJYeDZa46cY,1103 +numpy/f2py/__main__.py,sha256=TDesy_2fDX-g27uJt4yXIXWzSor138R2t2V7HFHwqAk,135 +numpy/f2py/__pycache__/__init__.cpython-310.pyc,, +numpy/f2py/__pycache__/__main__.cpython-310.pyc,, +numpy/f2py/__pycache__/__version__.cpython-310.pyc,, +numpy/f2py/__pycache__/_isocbind.cpython-310.pyc,, +numpy/f2py/__pycache__/_src_pyf.cpython-310.pyc,, +numpy/f2py/__pycache__/auxfuncs.cpython-310.pyc,, +numpy/f2py/__pycache__/capi_maps.cpython-310.pyc,, +numpy/f2py/__pycache__/cb_rules.cpython-310.pyc,, +numpy/f2py/__pycache__/cfuncs.cpython-310.pyc,, +numpy/f2py/__pycache__/common_rules.cpython-310.pyc,, +numpy/f2py/__pycache__/crackfortran.cpython-310.pyc,, +numpy/f2py/__pycache__/diagnose.cpython-310.pyc,, +numpy/f2py/__pycache__/f2py2e.cpython-310.pyc,, +numpy/f2py/__pycache__/f90mod_rules.cpython-310.pyc,, +numpy/f2py/__pycache__/func2subr.cpython-310.pyc,, +numpy/f2py/__pycache__/rules.cpython-310.pyc,, +numpy/f2py/__pycache__/symbolic.cpython-310.pyc,, +numpy/f2py/__pycache__/use_rules.cpython-310.pyc,, +numpy/f2py/__version__.py,sha256=TisKvgcg4vh5Fptw2GS1JB_3bAQsWZIKhclEX6ZcAho,35 +numpy/f2py/_backends/__init__.py,sha256=xIVHiF-velkBDPKwFS20PSg-XkFW5kLAVj5CSqNLddM,308 +numpy/f2py/_backends/__pycache__/__init__.cpython-310.pyc,, +numpy/f2py/_backends/__pycache__/_backend.cpython-310.pyc,, +numpy/f2py/_backends/__pycache__/_distutils.cpython-310.pyc,, +numpy/f2py/_backends/__pycache__/_meson.cpython-310.pyc,, +numpy/f2py/_backends/_backend.py,sha256=9RZDu4FCwCM7G39EX2YEt-Vnaz0U2WSp-QSAfz11BGE,1233 +numpy/f2py/_backends/_distutils.py,sha256=e3dqC9ddmppsCNhLngtOE3Z6WZnLfaG_N5xiIcHPVWI,2459 +numpy/f2py/_backends/_meson.py,sha256=GD5pv3ilTRjtU4wGWgWrakg4nFySOiaX4NdmgO3egYM,8322 +numpy/f2py/_backends/meson.build.template,sha256=6XD3j-K5pc1P_icgUWkrgEsyludQWsqS5rb6UB29tH0,1654 +numpy/f2py/_isocbind.py,sha256=QVoR_pD_bY9IgTaSHHUw_8EBg0mkaf3JZfwhLfHbz1Q,2422 +numpy/f2py/_src_pyf.py,sha256=3swmQKGTeQGVMLzTTkZqZHHQ5EP6RT2LjgaUnXv0S74,7904 +numpy/f2py/auxfuncs.py,sha256=5tF_ZvesfJDTmh-1Pq7NgV7ArDtD2aOGhwbb4VZtraE,28020 +numpy/f2py/capi_maps.py,sha256=Hj1g5T5Siyc4JWSZJPnbfXqPTCqoXblwiDET04UBh4k,31428 +numpy/f2py/cb_rules.py,sha256=hALemKsqa1qkTD2KqBcdGmRDhSTAuq1Z5ZsPlJjWdXw,25648 +numpy/f2py/cfuncs.py,sha256=qbuF9fJWlhVSZ3xIstFwrGWM7FO1Zy0DUzBk6HD11ik,54036 +numpy/f2py/common_rules.py,sha256=19VDEPQ9-Pzzknv03U23gWYesmDAzJrGxwdXqn7CxhQ,5277 +numpy/f2py/crackfortran.py,sha256=DnaLeMI4wdc9cUAUMSuCb_2_ZAzWuQfbi8puLeRNbeU,151841 +numpy/f2py/diagnose.py,sha256=0DtPTDjxbFUu0F_nDHfsD0vlCgnRhf8WZ1kHsXVWcpE,5351 +numpy/f2py/f2py2e.py,sha256=36qdKKlXxLiwFZoDwA9sYZMxH6IzoPY9alB8ZajnxDY,29621 +numpy/f2py/f90mod_rules.py,sha256=Q-e9Q79dkOvEBLoJDLTf7nX7WbtPf-qt4pbRI41kLYw,10144 +numpy/f2py/func2subr.py,sha256=Wro0C3NGSO-1g2zxBI8qg_Tl6KyczrCtCTJvhN4KtUQ,10621 +numpy/f2py/rules.py,sha256=lQjZ-e0LAArXNmso5c6H_IqXZiDdmcmmgmV3tztJ4UI,64516 +numpy/f2py/setup.cfg,sha256=828sy3JvJmMzVxLkC-y0lxcEMaDTnMc3l9dWqP4jYng,50 +numpy/f2py/src/fortranobject.c,sha256=R7AJfWjQiz2dLylWtFpvZByWvu9OCkG4UCkMa3t-jxw,47472 +numpy/f2py/src/fortranobject.h,sha256=uCcHO8mjuANlKb3c7YAZwM4pgT0CTaXWLYqgE27Mnt0,5996 +numpy/f2py/symbolic.py,sha256=BI5m8j7wEpq1u9yTDUBUtqUCCH1JBVVxyEFZRMjGWlA,54771 +numpy/f2py/tests/__init__.py,sha256=hiQX1lvI7rIYRNecVpg5D_0N6E0w94BSmexhEErutmI,343 +numpy/f2py/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_block_docstring.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_callback.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_character.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_common.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_crackfortran.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_data.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_docs.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_f2cmap.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_f2py2e.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_isoc.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_kind.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_mixed.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_modules.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_parameter.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_pyf_src.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_quoted_character.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_regression.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_return_character.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_return_complex.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_return_integer.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_return_logical.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_return_real.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_routines.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_size.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_string.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_symbolic.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-310.pyc,, +numpy/f2py/tests/__pycache__/util.cpython-310.pyc,, +numpy/f2py/tests/src/abstract_interface/foo.f90,sha256=aCaFEqfXp79pVXnTFtjZBWUY_5pu8wsehp1dEauOkSE,692 +numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90,sha256=y3R2dDn0BUz-0bMggfT1jwXbhz_gniz7ONMpureEQew,111 +numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=0UkctY5oeFs9B9qnX8qhe3wTFZA_mF-FBBkJoy_iuQg,7713 +numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=zfuOShmuotzcLIQDnVFaARwvM66iLrOYzpquIGDbiKU,30 +numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=fqbSr7VlKfVrBulFgQtQA9fQf0mQvVbLi94e4FTST3k,494 +numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=9pbi88-uSNP5IwS49Kim982jDAuopo3tpEhg2SOU7no,540 +numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=9Cl1sdrihB8cCSsjoQGmOO8VRv9ni8Fjr0Aku1UdEWM,288 +numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=3L_F7n5ju9F0nxw95uBUaPeuiDOw6uHvB580eIj7bqI,134 +numpy/f2py/tests/src/block_docstring/foo.f,sha256=KVTeqSFpI94ibYIVvUW6lOQ9T2Bx5UzZEayP8Maf2H0,103 +numpy/f2py/tests/src/callback/foo.f,sha256=rLqaaaUpWFTaGVxNoGERtDKGCa5dLCTW5DglsFIx-wU,1316 +numpy/f2py/tests/src/callback/gh17797.f90,sha256=-_NvQK0MzlSR72PSuUE1FeUzzsMBUcPKsbraHIF7O24,155 +numpy/f2py/tests/src/callback/gh18335.f90,sha256=n_Rr99cI7iHBEPV3KGLEt0QKZtItEUKDdQkBt0GKKy4,523 +numpy/f2py/tests/src/callback/gh25211.f,sha256=ejY_ssadbZQfD5_-Xnx_ayzWXWLjkdy7DGp6C_uCUCY,189 +numpy/f2py/tests/src/callback/gh25211.pyf,sha256=nrzvt2QHZRCcugg0R-4FDMMl1MJmWCOAjR7Ta-pXz7Y,465 +numpy/f2py/tests/src/callback/gh26681.f90,sha256=ykwNXWyja5FfZk1bPihbYiMmMlbKhRPoPKva9dNFtLM,584 +numpy/f2py/tests/src/cli/gh_22819.pyf,sha256=e3zYjFmiOxzdXoxzgkaQ-CV6sZ1t4aKugyhqRXmBNdQ,148 +numpy/f2py/tests/src/cli/hi77.f,sha256=bgBERF4EYxHlzJCvZCJOlEmUE1FIvipdmj4LjdmL_dE,74 +numpy/f2py/tests/src/cli/hiworld.f90,sha256=RncaEqGWmsH9Z8BMV-UmOTUyo3-e9xOQGAmNgDv6SfY,54 +numpy/f2py/tests/src/common/block.f,sha256=tcGKa42S-6bfA6fybpM0Su_xjysEVustkEJoF51o_pE,235 +numpy/f2py/tests/src/common/gh19161.f90,sha256=Vpb34lRVC96STWaJerqkDQeZf7mDOwWbud6pW62Tvm4,203 +numpy/f2py/tests/src/crackfortran/accesstype.f90,sha256=3ONHb4ZNx0XISvp8fArnUwR1W9rzetLFILTiETPUd80,221 +numpy/f2py/tests/src/crackfortran/common_with_division.f,sha256=JAzHD5aluoYw0jVGZjBYd1wTABU0PwNBD0cz3Av5AAk,511 +numpy/f2py/tests/src/crackfortran/data_common.f,sha256=rP3avnulWqJbGCFLWayjoFKSspGDHZMidPTurjz33Tc,201 +numpy/f2py/tests/src/crackfortran/data_multiplier.f,sha256=LaPXVuo5lX0gFZVh76Hc7LM1sMk9EBPALuXBnHAGdOA,202 +numpy/f2py/tests/src/crackfortran/data_stmts.f90,sha256=MAZ3gstsPqECk3nWQ5Ql-C5udrIv3sAciW1ZGTtHLts,713 +numpy/f2py/tests/src/crackfortran/data_with_comments.f,sha256=FUPluNth5uHgyKqjQW7HKmyWg4wDXj3XPJCIC9ZZuOs,183 +numpy/f2py/tests/src/crackfortran/foo_deps.f90,sha256=D9FT8Rx-mK2p8R6r4bWxxqgYhkXR6lNmPj2RXOseMpw,134 +numpy/f2py/tests/src/crackfortran/gh15035.f,sha256=0G9bmfVafpuux4-ZgktYZ6ormwrWDTOhKMK4wmiSZlQ,391 +numpy/f2py/tests/src/crackfortran/gh17859.f,sha256=acknjwoWYdA038oliYLjB4T1PHhXkKRLeJobIgB_Lbo,352 +numpy/f2py/tests/src/crackfortran/gh22648.pyf,sha256=xPnKx4RcT1568q-q_O83DYpCgVYJ8z4WQ-yLmHPchJA,248 +numpy/f2py/tests/src/crackfortran/gh23533.f,sha256=k2xjRpRaajMYpi5O-cldYPTZGFGB12PUGcj5Fm9joyk,131 +numpy/f2py/tests/src/crackfortran/gh23598.f90,sha256=20ukdZXq-qU0Zxzt4W6cO8tRxlNlQ456zgD09zdozCE,105 +numpy/f2py/tests/src/crackfortran/gh23598Warn.f90,sha256=FvnIxy5fEOvzNb5WSkWzPk7yZ9yIv0yPZk9vNnS-83w,216 +numpy/f2py/tests/src/crackfortran/gh23879.f90,sha256=jELVfEGEF66z_Pv_iBHp3yGsGhadB0dnKCDtPcaz_CM,352 +numpy/f2py/tests/src/crackfortran/gh27697.f90,sha256=mTOEncxZlam6N-3I-IL0ua-iLkgqDrrVXNsE-7y7jAM,376 +numpy/f2py/tests/src/crackfortran/gh2848.f90,sha256=-IpkeTz0j9_lkQeN9mT7w3U1cAJjQxSMdAmyHdF8oVg,295 +numpy/f2py/tests/src/crackfortran/operators.f90,sha256=cb1JO2hIMCQejZO_UJWluBCP8LdXQbBJw2XN6YHB3JA,1233 +numpy/f2py/tests/src/crackfortran/privatemod.f90,sha256=9O2oWEquIUcbDB1wIzNeae3hx4gvXAoYW5tGfBt3KWk,185 +numpy/f2py/tests/src/crackfortran/publicmod.f90,sha256=nU_VXCKiniiUq_78KAWkXiN6oiMQh39emMxbgOVf9cg,177 +numpy/f2py/tests/src/crackfortran/pubprivmod.f90,sha256=-uz75kquU4wobaAPZ1DLKXJg6ySCZoDME1ce6YZ2q5Y,175 +numpy/f2py/tests/src/crackfortran/unicode_comment.f90,sha256=wDMoF7F7VFYdeocfTyWIh7noniEwExVb364HrhUSbSg,102 +numpy/f2py/tests/src/f2cmap/.f2py_f2cmap,sha256=fwszymaWhcWO296u5ThHW5yMAkFhB6EtHWqqpc9FAVI,83 +numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90,sha256=rphN_mmzjCCCkdPM0HjsiJV7rmxpo4GoCNp5qmBzv8U,307 +numpy/f2py/tests/src/isocintrin/isoCtests.f90,sha256=Oir0PfE3mErnUQ42aFxiqAkcYn3B6b1FHIPGipDdekg,1032 +numpy/f2py/tests/src/kind/foo.f90,sha256=6_zq3OAWsuNJ5ftGTQAEynkHy-MnuLgBXmMIgbvL7yU,367 +numpy/f2py/tests/src/mixed/foo.f,sha256=Zgn0xDhhzfas3HrzgVSxIL1lGEF2mFRVohrvXN1thU0,90 +numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=6eEEYCH71gPp6lZ6e2afLrfS6F_fdP7GZDbgGJJ_6ns,187 +numpy/f2py/tests/src/mixed/foo_free.f90,sha256=UC6iVRcm0-aVXAILE5jZhivoGQbKU-prqv59HTbxUJA,147 +numpy/f2py/tests/src/modules/gh25337/data.f90,sha256=EqMEuEV0_sx4XbFzftbU_6VfGtOw9Tbs0pm0eVEp2cA,188 +numpy/f2py/tests/src/modules/gh25337/use_data.f90,sha256=DChVLgD7qTOpbYNmfGjPjfOx5YsphMIYwdwnF12X4xM,185 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90,sha256=MMLPSzBwuGS4UwCXws9djH11F5tG5xFLc80CDb4U9Mk,423 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90,sha256=1dJD1kDC_wwn7v_zF49D3n62T1x9wFxGKanQQz_VI7k,424 +numpy/f2py/tests/src/modules/module_data_docstring.f90,sha256=-asnMH7vZMwVIeMU2YiLWgYCUUUxZgPTpbAomgWByHs,236 +numpy/f2py/tests/src/modules/use_modules.f90,sha256=bveSAqXIZtd4NMlDfFei1ZlesFAa9An5LjkD-gDk2ms,418 +numpy/f2py/tests/src/negative_bounds/issue_20853.f90,sha256=IxBGWem-uv9eHgDhysEdGTmNKHR1gAiU7YJPo20eveM,164 +numpy/f2py/tests/src/parameter/constant_array.f90,sha256=fkYemwIBKsP63-FGKBW8mzOAp6k13eZOin8sQe1pyno,1513 +numpy/f2py/tests/src/parameter/constant_both.f90,sha256=L0rG6-ClvHx7Qsch46BUXRi_oIEL0uw5dpRHdOUQuv0,1996 +numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=lAT76HcXGMgr1NfKof-RIX3W2P_ik1PPqkRdJ6EyBmM,484 +numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=42jROArrG7vIag9wFa_Rr5DBnnNvGsrEUgpPU14vfIo,634 +numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=u9MRf894Cw0MVlSOUbMSnFSHP4Icz7RBO21QfMkIl-Q,632 +numpy/f2py/tests/src/parameter/constant_real.f90,sha256=QoPgKiHWrwI7w5ctYZugXWzaQsqSfGMO7Jskbg4CLTc,633 +numpy/f2py/tests/src/quoted_character/foo.f,sha256=0zXQbdaqB9nB8R4LF07KDMFDbxlNdiJjVdR8Nb3nzIM,496 +numpy/f2py/tests/src/regression/AB.inc,sha256=ydjTVb6QEw1iYw2tRiziqqzWcDHrJsNWr3m51-rqFXQ,17 +numpy/f2py/tests/src/regression/assignOnlyModule.f90,sha256=vPJbhOlNsLrgN3su4ohHUSbxE4GGKU7SiJh7dhBvX3o,633 +numpy/f2py/tests/src/regression/datonly.f90,sha256=HuBLuEw0kNEplJ9TxxSNr7hLj-jx9ZNGaXC8iLm_kf8,409 +numpy/f2py/tests/src/regression/f77comments.f,sha256=FjP-07suTBdqgtwiENT04P-47UB4g9J5-20IQdXAHhM,652 +numpy/f2py/tests/src/regression/f77fixedform.f95,sha256=KdKFcAc3ZrID-h4nTOJDdEYfQzR2kkn9VqQCorfJGpM,144 +numpy/f2py/tests/src/regression/f90continuation.f90,sha256=VweFIi5-xxZhtgSOh8i_FjMPXu_od9qjrDHq6ma5X5k,285 +numpy/f2py/tests/src/regression/incfile.f90,sha256=gq87H2CtCZUON9V5UzcK6x_fthnWDVuPFQLa0fece1M,97 +numpy/f2py/tests/src/regression/inout.f90,sha256=TlMxJjhjjiuLI--Tg2LshLnbfZpiKz37EpR_tPKKSx8,286 +numpy/f2py/tests/src/regression/lower_f2py_fortran.f90,sha256=bWlj2Frch3onnUpd6DTaoLDa6htrrbkBiI9JIRbQPfE,105 +numpy/f2py/tests/src/return_character/foo77.f,sha256=tRyQSu9vNWtMRi7gjmMN-IZnS7ogr5YS0n38uax_Eo0,1025 +numpy/f2py/tests/src/return_character/foo90.f90,sha256=WPQZC6CjXLbUYpzy5LItEoHmRDFxW0ABB3emRACsjZU,1296 +numpy/f2py/tests/src/return_complex/foo77.f,sha256=7-iKoamJ-VObPFR-Tslhiw9E-ItIvankWMyxU5HqxII,1018 +numpy/f2py/tests/src/return_complex/foo90.f90,sha256=_GOKOZeooWp3pEaTBrZNmPmkgGodj33pJnJmySnp7aE,1286 +numpy/f2py/tests/src/return_integer/foo77.f,sha256=EKs1KeAOQBkIO99tMCx0H7_lpqvqpjie8zWZ6T_bAR4,1234 +numpy/f2py/tests/src/return_integer/foo90.f90,sha256=0aYWcaAVs7Lw3Qbf8hupfLC8YavRuPZVIwjHecIlMOo,1590 +numpy/f2py/tests/src/return_logical/foo77.f,sha256=Ax3tBVNAlxFtHhV8fziFcsTnoa8YJdapecMr6Qj7fLk,1244 +numpy/f2py/tests/src/return_logical/foo90.f90,sha256=IZXCerFecYT24zTQ_spIoPr6n-fRncaM0tkTs8JqO1E,1590 +numpy/f2py/tests/src/return_real/foo77.f,sha256=3nAY1YtzGk4osR2jZkHMVIUHxFoOtF1OLfWswpcV7kA,978 +numpy/f2py/tests/src/return_real/foo90.f90,sha256=38ZCnBGWb9arlJdnVWvZjVk8uesrQN8wG2GrXGcSIJs,1242 +numpy/f2py/tests/src/routines/funcfortranname.f,sha256=ruyXK6eQSLQnQ_rODT1qm1cJvpHrFhI6NRrnWvEIK0U,128 +numpy/f2py/tests/src/routines/funcfortranname.pyf,sha256=EgRw8ZWGdd2uK4qCZD89r9VQtEXmnKDx59OpB0K58as,451 +numpy/f2py/tests/src/routines/subrout.f,sha256=35DjHIj85ZLkxRxP4bs-WFTQ5y1AyDqBKAXTzSSTAxE,94 +numpy/f2py/tests/src/routines/subrout.pyf,sha256=xT_WnDpvpyPb0FMRAVTRRgm3nlfALf1Ojg8x3qZNv_4,332 +numpy/f2py/tests/src/size/foo.f90,sha256=nK_767f1TtqVr-dMalNkXmcKbSbLCiabhRkxSDCzLz0,859 +numpy/f2py/tests/src/string/char.f90,sha256=X_soOEV8cKsVZefi3iLT7ilHljjvJJ_i9VEHWOt0T9Y,647 +numpy/f2py/tests/src/string/fixed_string.f90,sha256=tCN5sA6e7M1ViZtBNvTnO7_efk7BHIjyhFKBoLC3US0,729 +numpy/f2py/tests/src/string/gh24008.f,sha256=Z6cq8SFGvmaA72qeH9tu1rP8pYjqm0ONpHn7nGbhoLA,225 +numpy/f2py/tests/src/string/gh24662.f90,sha256=xJkiYvrMT9Ipb9Cq7OXl1Ev6TISl8pq1MGemySzfGd0,204 +numpy/f2py/tests/src/string/gh25286.f90,sha256=lqEl81Iu9GIDTAbOfkkNGcGgDyyGnPB44mJw2iK1kng,318 +numpy/f2py/tests/src/string/gh25286.pyf,sha256=wYkkr5gEN9_RtGjpqh28X1k8KCgh0-Ds9XAt8IC9j4A,393 +numpy/f2py/tests/src/string/gh25286_bc.pyf,sha256=ZRvgSzRlaPEx8GyNt97FrRhtCg-r4ZTEDsHNBfit4m8,396 +numpy/f2py/tests/src/string/scalar_string.f90,sha256=U1QqVgbF1DbxdFekRjchyDlFRPnXwzG72kuE8A44Za8,185 +numpy/f2py/tests/src/string/string.f,sha256=JCwLuH21Ltag5cw_9geIQQJ4Hv_39NqG8Dzbqj1eDKE,260 +numpy/f2py/tests/src/value_attrspec/gh21665.f90,sha256=MbbSUQI5Enzq46KWFHRzQbY7q6ZHJH_9NRL-C9i13Wg,199 +numpy/f2py/tests/test_abstract_interface.py,sha256=673rVYr6ZsMSb3lumjiqeyK2DjkMLEFrqmpRljYWRes,833 +numpy/f2py/tests/test_array_from_pyobj.py,sha256=8W5j1nqbXbUpCsCw3S5mpGfuStvkFCKVdfXi7eU2co4,24379 +numpy/f2py/tests/test_assumed_shape.py,sha256=IyqJPGpGVv_RaRCwrko_793jLxJC1495tR9gAbmTlR8,1515 +numpy/f2py/tests/test_block_docstring.py,sha256=0Dh1GXlaCg33DmlbhC08MOBMXdpMbk983MQB2hB7XhA,600 +numpy/f2py/tests/test_callback.py,sha256=pIloccFF6nJOMwD4yOiDWHFUtU2PfKkrZfWuhqIhBM0,7375 +numpy/f2py/tests/test_character.py,sha256=IuV6DQ--Tr-NEAWSxzWzrjDQtVAgXLiV-jfHi_dc5Sc,22544 +numpy/f2py/tests/test_common.py,sha256=z1qoOm6HFvLal_cOCPuNn7NVohWjWBcO2v1maVFfRhQ,661 +numpy/f2py/tests/test_crackfortran.py,sha256=0xxfF0AbYddVou72KbRZX2IMnSfUh3Cj5hh5FEH5vjM,16801 +numpy/f2py/tests/test_data.py,sha256=JSObh8NfZipQQp0_021GLVKhmwhiNxEvAf5Zm2q0dds,2958 +numpy/f2py/tests/test_docs.py,sha256=IAauf96ibmpi6hzND8dI_vfAnLoUn-GzHMVf05GIwJM,1909 +numpy/f2py/tests/test_f2cmap.py,sha256=2Yy4zuFrkn0QvCkiRjGiHqirp9bXe8ODSnM_LYNAUsM,400 +numpy/f2py/tests/test_f2py2e.py,sha256=BeUBmCNKiXYe2TxPMChpHMCo7MZnbqYHl6iToJ4q25g,28832 +numpy/f2py/tests/test_isoc.py,sha256=KGUijaN2Qum_qQD1Rc7m7B3dMTx47oRud8ZWNfc5M0Y,1481 +numpy/f2py/tests/test_kind.py,sha256=iVs-TL343aNa6NOaw31EaYB3scFdnU4n0_IKPdjyAco,1832 +numpy/f2py/tests/test_mixed.py,sha256=95O8xkouDaNFckMa2T4qnUfBpVEVugbM0iruQo9JFpw,893 +numpy/f2py/tests/test_modules.py,sha256=mMLzcjENVJ3on--z9qmbUthruWz02T49XiY_A0xbzkw,2380 +numpy/f2py/tests/test_parameter.py,sha256=KTmgD77wZFHqZyq4wfRGbR9RisNti8IgO-Q6mUneSwo,4753 +numpy/f2py/tests/test_pyf_src.py,sha256=RLm95aANGakQYCzk_UJjUcq0mOQH0LtD6HoZYkEiIrU,1179 +numpy/f2py/tests/test_quoted_character.py,sha256=cLPRMhNiCO0v-_A5jPkTg-Zv38U-bbJteuLOL9VSZik,493 +numpy/f2py/tests/test_regression.py,sha256=A3a3hbpMqUrFEKp3p3IxueubfaoZyJZBJQz7A0BJqe4,6023 +numpy/f2py/tests/test_return_character.py,sha256=9hAUrTWmHkSnRQM4pz43cLFBSEIU5sN8g2M8xaqBqBE,1557 +numpy/f2py/tests/test_return_complex.py,sha256=ynSaaMSxiBTApp-tIGwXHLe5gCjqm4qJCq_QNwihGWk,2481 +numpy/f2py/tests/test_return_integer.py,sha256=PNeeeykh0Q9oPxUCcuLC3Q1XFbRrk7jhQwK6erjau0M,1830 +numpy/f2py/tests/test_return_logical.py,sha256=gPBO6zxmwek0fUIvCDgybiltiNqiMwaIqqsY2o0PXtg,2081 +numpy/f2py/tests/test_return_real.py,sha256=e39QqQEDkpxkVEl_5qK67cu7uv0iZUaRA7tlYeKynV0,3354 +numpy/f2py/tests/test_routines.py,sha256=4Bg3qLRIyKFzdM3BoRW6vn6CKI2EUzlt5wnHDBzBx0c,822 +numpy/f2py/tests/test_semicolon_split.py,sha256=ZkWpV7iKLoSffVdoz_iDdmZnm0Ty4zZSG5git8dsBeY,1700 +numpy/f2py/tests/test_size.py,sha256=GKZ5zCsY-wWq4zwlBfMpLub-9Mziy5GFOC7dg39k7ng,1198 +numpy/f2py/tests/test_string.py,sha256=KEic6DcDoHZuqofWtytUAqaOC-GWR4SVa2jxsdXq1zw,3034 +numpy/f2py/tests/test_symbolic.py,sha256=Zk4h3WC2etMrIEyMrayPpGthpWfuS35Yz-4XzzGFcY4,18835 +numpy/f2py/tests/test_value_attrspec.py,sha256=CbcEA3U2rFrFE-7roKIXQXP02Vq7pgwicrP05XrizK0,343 +numpy/f2py/tests/util.py,sha256=9BhKV5A5gwhrBBdnI_aa0n5PwPXS4yRijd8VjBzflCA,12615 +numpy/f2py/use_rules.py,sha256=zWh8pG5ewfg_LInDmT48O7c0oBlDaGQ4exp5C5ZUZzU,3621 +numpy/fft/__init__.py,sha256=MwVEjIo3wDxMAbKERmmX3cHh8EK9nIw9vlUNTpOgNyo,8541 +numpy/fft/__init__.pyi,sha256=9LUY_NorLJecQQHN-0dLE9uVLhwv03Bh2iFVTOpSMW8,557 +numpy/fft/__pycache__/__init__.cpython-310.pyc,, +numpy/fft/__pycache__/_helper.cpython-310.pyc,, +numpy/fft/__pycache__/_pocketfft.cpython-310.pyc,, +numpy/fft/__pycache__/helper.cpython-310.pyc,, +numpy/fft/_helper.py,sha256=nAtQQ7eHZrQhws3IEIBtpnCWA4emPricOmNnXrm_bng,7010 +numpy/fft/_helper.pyi,sha256=Fraw7-4rRa4tl_UT1HWzvGrR2bE7rNcru0PpsC1_byU,1379 +numpy/fft/_pocketfft.py,sha256=3M0RsdVo_6SpjG12H7W67Wr5GGXc83ipSAx-4gCV2VY,64379 +numpy/fft/_pocketfft.pyi,sha256=PUfhum-xLMNaYacpoJZj3ho-wsoIWrbAS_pq84V8oEc,3292 +numpy/fft/_pocketfft_umath.cp310-win_amd64.lib,sha256=vHAqjypeqqRF3HscG4FcAJEsNbkUQVcMe2TLvOFhtEs,2176 +numpy/fft/_pocketfft_umath.cp310-win_amd64.pyd,sha256=6ikozoUVz3mjUoxh6AhBc2f0tYY9muFMhTSUUuoOkNY,279040 +numpy/fft/helper.py,sha256=Dvf6DS9pHTCmugMQy5IBwk5LlSt5PjdShv1IRsUySIY,626 +numpy/fft/helper.pyi,sha256=MDJI7k0BFz8N1DuYkyBCEdaT09d3CHEsBaG9JAgs2aI,913 +numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/fft/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/fft/tests/__pycache__/test_helper.cpython-310.pyc,, +numpy/fft/tests/__pycache__/test_pocketfft.cpython-310.pyc,, +numpy/fft/tests/test_helper.py,sha256=-CrZvGxoD1xhFNVsHJS3oNTw6yYoNq06CKHmWO_0fSk,6316 +numpy/fft/tests/test_pocketfft.py,sha256=QasTw3GPyU-MiB1qgtcDxBSjCGrBnCt0BTUmMjnrAFU,24999 +numpy/lib/__init__.py,sha256=pcYU9wc4cOsrPI9GocW4nkAHr28r3OkEFWx6b6tXsdY,3320 +numpy/lib/__init__.pyi,sha256=ytClnxgcmYBSM80EuL8ooDJr7uMZttvWzi2JexClhPQ,538 +numpy/lib/__pycache__/__init__.cpython-310.pyc,, +numpy/lib/__pycache__/_array_utils_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_arraypad_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_arraysetops_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_arrayterator_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_datasource.cpython-310.pyc,, +numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_histograms_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_index_tricks_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_iotools.cpython-310.pyc,, +numpy/lib/__pycache__/_nanfunctions_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_npyio_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_polynomial_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_scimath_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_shape_base_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_stride_tricks_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_twodim_base_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_type_check_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_ufunclike_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_user_array_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_utils_impl.cpython-310.pyc,, +numpy/lib/__pycache__/_version.cpython-310.pyc,, +numpy/lib/__pycache__/array_utils.cpython-310.pyc,, +numpy/lib/__pycache__/format.cpython-310.pyc,, +numpy/lib/__pycache__/introspect.cpython-310.pyc,, +numpy/lib/__pycache__/mixins.cpython-310.pyc,, +numpy/lib/__pycache__/npyio.cpython-310.pyc,, +numpy/lib/__pycache__/recfunctions.cpython-310.pyc,, +numpy/lib/__pycache__/scimath.cpython-310.pyc,, +numpy/lib/__pycache__/stride_tricks.cpython-310.pyc,, +numpy/lib/__pycache__/user_array.cpython-310.pyc,, +numpy/lib/_array_utils_impl.py,sha256=8V5hh2JYzL0LKy2KBrRPh-FZHjfKrn7nyS_VNrvRSO0,1751 +numpy/lib/_array_utils_impl.pyi,sha256=PvJJhNRRyOcLtY-FGwkAmRCdJfHEWcl4BwnU11Waq_s,818 +numpy/lib/_arraypad_impl.py,sha256=Ri6I_s95XLEUwSykx9eOBz7JqiPNKQTMH24uQrDBD-k,33217 +numpy/lib/_arraypad_impl.pyi,sha256=bOjBqunPqcmMN4RiOMe0T5d03gIMxIm_fkGMn8ONiSM,1881 +numpy/lib/_arraysetops_impl.py,sha256=4_42VA7Lu4P-ODK9M3kusK-xKQP5u7tcThyuHMcn0Vo,40524 +numpy/lib/_arraysetops_impl.pyi,sha256=B8kt91kSzkrtA-fwqwWWLNpPDzEmcM0-BWKiYd9Ne0I,9948 +numpy/lib/_arrayterator_impl.py,sha256=n1_emvfqXcvOtLhaeMtCvYiZqBCnVCNfwpI95loCBa0,7410 +numpy/lib/_arrayterator_impl.pyi,sha256=U-olocb1ETuXURoG6QuYhq1xoeDxrtSQInphHsHCGiU,1873 +numpy/lib/_datasource.py,sha256=H7HKFHCye9r2mLDup6KYnuKcjUrOd3Gc2wFMn78rVGY,23429 +numpy/lib/_datasource.pyi,sha256=a_mEw94cyK-Ik7ZaQIDIJp8CB2pYV-1FEvRkZHCM20c,1027 +numpy/lib/_function_base_impl.py,sha256=3T1Z1wJatBn8fV7okN9moLoiL5jSiNMGRvex0jRfhvg,201865 +numpy/lib/_function_base_impl.pyi,sha256=1FuQSPYALot4Z8LXSVvcjiP8ZlZh0fvFBSPHcGgK4iY,23122 +numpy/lib/_histograms_impl.py,sha256=WCi3-3v3J6NeXxoQDy2MBxJCQSufgMkmtvr4IIgK-I4,39852 +numpy/lib/_histograms_impl.pyi,sha256=HFQ2VaV5St1hH7CrdnUOn2pKurKjXoNRN4TJ9D1vmYI,1118 +numpy/lib/_index_tricks_impl.py,sha256=34-lqDXxLap3V6I_C27OeyYJLi1fKIRyz-WJ3K2g7D8,33248 +numpy/lib/_index_tricks_impl.pyi,sha256=shCCeeX_Xd_fA18YRmhhtk4WU76BP1qFYOQPB2mzeTg,6521 +numpy/lib/_iotools.py,sha256=gfw4LwMszW5bDH78mM1Y_VWWCZ_u24uW-tuoZAVBM2k,31840 +numpy/lib/_iotools.pyi,sha256=_234y4IuHkYOY1Mt-7lJ-kJ3FJbhjoMRl0zZhyVS-dk,3493 +numpy/lib/_nanfunctions_impl.py,sha256=32gsaYfFPKAqeqxkjl9XAFYaKun6F9f9-Auhk0t73Jk,74178 +numpy/lib/_nanfunctions_impl.pyi,sha256=N8tDZ0fhyMiFD0-4M_rBP61DmDkIGiN171GNjwVxlck,886 +numpy/lib/_npyio_impl.py,sha256=53vlumSVsWMjaSaFNtM7Adb8xzBlEI1KiZB-Yxiuvx0,101972 +numpy/lib/_npyio_impl.pyi,sha256=YH-oiyiwd5li4imo6_kjvpxGgnO-_wQuD4NFsz0ClW0,9555 +numpy/lib/_polynomial_impl.py,sha256=o5Qd-VSe36gsgEuJBrjJrW4j5ZMMfYuq6howHR3TazE,45752 +numpy/lib/_polynomial_impl.pyi,sha256=MmlWawTNlEnyvlMokm_w3IyWpUIDQvPjixH3CGVurMo,7432 +numpy/lib/_scimath_impl.py,sha256=3P2GH0hldWMw6hgqNmv_KXlENfFYzeIy9gqrqcOugdg,16334 +numpy/lib/_scimath_impl.pyi,sha256=9aJJX_D19ccIrGqy0VCmRGAGVoluWCYQLW7_ecyk_XE,3049 +numpy/lib/_shape_base_impl.py,sha256=LgKYMnNErkMy8xbIumjaPJHFPOALvFTRwGQHYjfCtZo,40647 +numpy/lib/_shape_base_impl.pyi,sha256=fl0aTfotN-9IOeGsEAVLnexbM-njPFKnYU13cBujHkE,5513 +numpy/lib/_stride_tricks_impl.py,sha256=0Lrnvmponu10hS2g6E0Ec7sHuNrfNS5CoPZPqWPP74M,18574 +numpy/lib/_stride_tricks_impl.pyi,sha256=FeWPs1yD4uQQSis8w4cm9-YW7IQ6bv333JFaqIc0zrQ,1881 +numpy/lib/_twodim_base_impl.py,sha256=_pfbE4LTkMSssA5Piz1F2c9pOMtJMO7LNw_My6PF0kA,35052 +numpy/lib/_twodim_base_impl.pyi,sha256=JgqPjlBo1JFeYdMnb2NkMYyR8Dvami3hjk4VQ-MK6mY,11706 +numpy/lib/_type_check_impl.py,sha256=NZhF_zIbdmDzbLbwakOnPhl2eRz3lJW_rNzBCULLSEk,19919 +numpy/lib/_type_check_impl.pyi,sha256=si8-6dnzrcaeVtdqnoL6Cu82ZdoB9inkjmZxiRdaFz4,5366 +numpy/lib/_ufunclike_impl.py,sha256=mq924a_rI7wvsWoPKHyc38WLI11fxCAiog-k6gJ5br0,6516 +numpy/lib/_ufunclike_impl.pyi,sha256=4Q_uMOYYI58InUsBcBnq3l-JBROsvEUytTB7xYBT8ls,1389 +numpy/lib/_user_array_impl.py,sha256=mx1xZjZib3SxnopvslFEK6Z-ql_ZzgUsn1u0LZ8KnXw,8262 +numpy/lib/_user_array_impl.pyi,sha256=no_xh1L4-mCrXwMRcYGVEqRsznuuYq8kwgkRcaHuWkc,9521 +numpy/lib/_utils_impl.py,sha256=9jwNKayFoYxCrg4GgdFXBCZwfjMNczoaQBfs0msKxVs,24163 +numpy/lib/_utils_impl.pyi,sha256=Avu_JgLOX4FJnFI8KcqUWdx_V2ldS6_YHNMg9yvfygY,284 +numpy/lib/_version.py,sha256=m4Z1ufCoQH5yYndKrkXKiN3p8FIygUbeYt2fjGfi2Rs,5009 +numpy/lib/_version.pyi,sha256=zAmfNnFeke7_lHsvR94fafNBcuJHpZ1jaB2PyzEostc,658 +numpy/lib/array_utils.py,sha256=SyMHXlsOJMKwxkjQxjsxx3J2cgx_3J2N0qqmLZTQgMc,137 +numpy/lib/array_utils.pyi,sha256=YYnx_V4CMdSbJTCnYboN1swcswmlOD2e4ZvQj5WsSak,197 +numpy/lib/format.py,sha256=yKvqaH4nwrS7GPoQUV_YgnNC_s0KQaZ-08WG10q1x3I,37208 +numpy/lib/format.pyi,sha256=qF5MgX4HL45SWz12KobX03cr40MoDiXDs4vFltAZVuE,770 +numpy/lib/introspect.py,sha256=P7-Um4--wGHOWLVusNN1bhjMuA1g6kKmu-jx1GGeOPM,2810 +numpy/lib/introspect.pyi,sha256=IsntuFrlFhRBZcGGhRUTAgnONUHEbYw_2ApPmffx8QE,155 +numpy/lib/mixins.py,sha256=hSDMCuYP518waugn3Vdu_S4tbXtDeUkc-zB3wwzyoOI,7519 +numpy/lib/mixins.pyi,sha256=pBHGtj8_EFCwyv6uPlKEMrBTOysEezePNdjqUYMsgPM,3205 +numpy/lib/npyio.py,sha256=nZadg1IKRXTLZX_52TpjU-YutNH5QA_UU457rHfn6oc,65 +numpy/lib/npyio.pyi,sha256=6xZ6zF-6qKuSOfjjDL4YN43xKPYcD6IpzJiDiLpmSSs,121 +numpy/lib/recfunctions.py,sha256=SgWulquccYhucyVpz1aU6Qi8pbcMKyEppbsrpIsh-nM,61339 +numpy/lib/recfunctions.pyi,sha256=Hjbbqt7Jl-bmrWEBvR1ZZHL6EForacXyk73oRnlBXus,13718 +numpy/lib/scimath.py,sha256=HgFt3iWrgcxgV4Y6U-xyZZBM_MMewX62uP8HhOxhveY,122 +numpy/lib/scimath.pyi,sha256=PhlpjveDqnSQvLn2cQ1AQFNVpxECaBWgYvhK8S32jzo,245 +numpy/lib/stride_tricks.py,sha256=BDqFklWQ4eVAoAvtdb_3nT0YxXeMZOtPp6nBr7gKG64,85 +numpy/lib/stride_tricks.pyi,sha256=6-K3R7XBw_fcpHaAIs9y4LEc5i4r5gZUG-tg4EOR-ew,128 +numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/lib/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test__datasource.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test__iotools.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test__version.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_array_utils.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_arraypad.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_arraysetops.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_arrayterator.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_format.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_histograms.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_index_tricks.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_io.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_loadtxt.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_mixins.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_nanfunctions.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_packbits.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_polynomial.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_recfunctions.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_regression.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_shape_base.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_stride_tricks.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_twodim_base.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_type_check.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_ufunclike.cpython-310.pyc,, +numpy/lib/tests/__pycache__/test_utils.cpython-310.pyc,, +numpy/lib/tests/data/py2-np0-objarr.npy,sha256=ZLoI7K3iQpXDkuoDF1Ymyc6Jbw4JngbQKC9grauVRsk,258 +numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 +numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 +numpy/lib/tests/data/py3-objarr.npy,sha256=7mtikKlHXp4unZhM8eBot8Cknlx1BofJdd73Np2PW8o,325 +numpy/lib/tests/data/py3-objarr.npz,sha256=vVRl9_NZ7_q-hjduUr8YWnzRy8ESNlmvMPlaSSC69fk,453 +numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 +numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 +numpy/lib/tests/test__datasource.py,sha256=H6PZKQ0tY6r1bhrcLRKMjWdWop5P4Rj_SYvrU9ukDzc,10921 +numpy/lib/tests/test__iotools.py,sha256=ejbG7SVvTm55Lq5LdUza8-nIvF2mt-XYvfpzn13q038,14097 +numpy/lib/tests/test__version.py,sha256=v2TOlH4f1Pmzxn1HWby3eBgLO9tGnhwH2LvBXlXtHP4,2063 +numpy/lib/tests/test_array_utils.py,sha256=Fy8_PR6GHed-mStqcbfjTe8Q5zMZnJ9WzFzX6DjoRR0,1152 +numpy/lib/tests/test_arraypad.py,sha256=Nc4xoxjlZkuaFSWgc2uP9bIXiLaYcje1tFF2fbIMlAE,57480 +numpy/lib/tests/test_arraysetops.py,sha256=yQy2uGGx_oYJu8nDEYujP_NIlDBmxCiyH5a7t5UH8cA,39023 +numpy/lib/tests/test_arrayterator.py,sha256=IRVmzxbr9idboJjOHKuX_8NQhMAKs7pD1xWqmU3ZERw,1337 +numpy/lib/tests/test_format.py,sha256=6Kt8l-P9lYsCbY2KRQBusCrNeoGYlFTMcFagXs3gVeY,41937 +numpy/lib/tests/test_function_base.py,sha256=7jlXXT_0-ChV3Sak8bjxHGyfoXcramgCi6MTDIryaaw,173318 +numpy/lib/tests/test_histograms.py,sha256=4PnaePQSpV_HsKynnbe5Hc5L02Z66ecCL24cvAYoeRg,34535 +numpy/lib/tests/test_index_tricks.py,sha256=tgXpLGpT9XpO_djXCTKpM0-WF-AVE5GF8lbvIyUz9X4,20921 +numpy/lib/tests/test_io.py,sha256=cUYVHDew1N-OfetUt-8e19VqMMWAUKZNhN6udbxfcZw,112868 +numpy/lib/tests/test_loadtxt.py,sha256=pvRZMon6Vyy_pdbEiJdJi17RW6I-Rg83Uc3XwMCvew0,41622 +numpy/lib/tests/test_mixins.py,sha256=nIec_DZIDx7ONnlpq_Y2TLkIULAPvQ7LPqtMwEHuV4U,7246 +numpy/lib/tests/test_nanfunctions.py,sha256=oeuoa1r3zx5JJFkU_zdne8GMSWET3UPDxL1sdVZPfAM,54762 +numpy/lib/tests/test_packbits.py,sha256=yN8rYbPDteOPJf7dEeZkgSnyzIUKe_ituLYdsqxcqbQ,17920 +numpy/lib/tests/test_polynomial.py,sha256=qYZGXUIeyZoNjbkEYeuUq1ad2eCPDkNXj6MxakvbIvk,11731 +numpy/lib/tests/test_recfunctions.py,sha256=Yg2pQEcOgf4d3PgrGiwxhrrXfyPXRa3SsyItxkY_wwA,45029 +numpy/lib/tests/test_regression.py,sha256=aCW5aT1PJL1ZCwrHUSa7iixQLWMC3D5iFSRDsWE2Uag,7921 +numpy/lib/tests/test_shape_base.py,sha256=fYKyGdLTM-l2rlTHAzDJbObc_SQWXXF8QoKt266F7K4,28296 +numpy/lib/tests/test_stride_tricks.py,sha256=EKHYiPoawG_vu_tFmKi5Lmvfs0VEDcUW7feiWybUGXA,23644 +numpy/lib/tests/test_twodim_base.py,sha256=mNNXsDKT3hPpz-HB_1k8YTWpwdx7dnvmrWWS_Lkew30,19382 +numpy/lib/tests/test_type_check.py,sha256=2lnLRzUA0voTKURi-qXllYYxBAqpsVAJmMtLQCHoIYA,15145 +numpy/lib/tests/test_ufunclike.py,sha256=9C9LV3XZLaHNQoyRVZl-C4w9HcOTEJMDw2uXYXhf1u4,3123 +numpy/lib/tests/test_utils.py,sha256=KN1q-eFLmckYbOMTxPKTwFMPtzBHdAPb0j9ntfea_yM,2454 +numpy/lib/user_array.py,sha256=v3dCCNs-PZ7tHZ1vqGqdeV5FLHRiLLWrMZhdzQTSRAM,50 +numpy/lib/user_array.pyi,sha256=IaCNerLboKjt3Fm-_k_d8IqeyJf7Lc9Pr5ROUr6wleM,54 +numpy/linalg/__init__.py,sha256=AZnH2FnMk_bDy8VuOsihmoS-nICrpKIRMPNa5Puyk30,2201 +numpy/linalg/__init__.pyi,sha256=Czr1hGuEjSGY_J7NbFaprCisxeIANCZAYqKz0YRUQAI,1076 +numpy/linalg/__pycache__/__init__.cpython-310.pyc,, +numpy/linalg/__pycache__/_linalg.cpython-310.pyc,, +numpy/linalg/__pycache__/linalg.cpython-310.pyc,, +numpy/linalg/_linalg.py,sha256=yqAqD7BFR_C8y1I4BJ9KAiYkmhUJ1683g5hQoxvYjl4,118309 +numpy/linalg/_linalg.pyi,sha256=namoF69OjkhE0tz2ngdjcdU0NwPG4uhoDLjzb5_iAnc,11867 +numpy/linalg/_umath_linalg.cp310-win_amd64.lib,sha256=hQFlOtBcmYLBBWP2DFzIVsZdfW54sF9M8sNyAnT3PF4,2120 +numpy/linalg/_umath_linalg.cp310-win_amd64.pyd,sha256=FscrWwGPhO-90sHURp-utkPv1RP6ftLA3UvUy_bGbzc,108032 +numpy/linalg/_umath_linalg.pyi,sha256=g5NJoNte6CwuMFDfd55O8OvJv4lOi539VKAB-Mrc864,1470 +numpy/linalg/lapack_lite.cp310-win_amd64.lib,sha256=hvoi4odUIg3jDapKqhCPZB3kK1w7xdNCC1neYhcnsYI,2084 +numpy/linalg/lapack_lite.cp310-win_amd64.pyd,sha256=9FeW7d3ro-9ffbROz6qPH5qp8CVs-Sb-GOyLITL1FLk,17920 +numpy/linalg/lapack_lite.pyi,sha256=sWKWBDR2UP0ez6ETdE0Rz-mp8m_gOCMo4CYVZajDMNo,2818 +numpy/linalg/linalg.py,sha256=1CC9jc-u61GePC5AuieDiyMyrVvgLD8ZJbTPvLfKjHc,600 +numpy/linalg/linalg.pyi,sha256=iGd8b4-gN1d92K7wfgDZxoHrVXnVC1c6vGqW4ZbWldY,1001 +numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/linalg/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/linalg/tests/__pycache__/test_deprecations.cpython-310.pyc,, +numpy/linalg/tests/__pycache__/test_linalg.cpython-310.pyc,, +numpy/linalg/tests/__pycache__/test_regression.cpython-310.pyc,, +numpy/linalg/tests/test_deprecations.py,sha256=GaeE3JnQlJLoAfbY93LmgCFUlV5M8IFmQ7EhF4WbqwU,660 +numpy/linalg/tests/test_linalg.py,sha256=D7AhxqGiHwPjIHI3JNkPcsYFOstTiKe_PTNl6d4YlSE,85701 +numpy/linalg/tests/test_regression.py,sha256=KRXOhAHjZbE3h6vqksTvQayrvhUkyRM8_O6Ky5s-Nqs,6866 +numpy/ma/API_CHANGES.txt,sha256=U39zA87aM_OIJhEKvHgL1RY1lhMJZc1Yj3DGLwbPbF0,3540 +numpy/ma/LICENSE,sha256=1427IIuA2StNMz5BpLquUNEkRPRuUxmfp3Jqkd5uLac,1616 +numpy/ma/README.rst,sha256=_MHrqHTE8L4wiJJqvaOh1l-xTxidwdilc_SZkFbgubM,10110 +numpy/ma/__init__.py,sha256=EFe3qk5iN_7Z__BwlkEW6xo2Zc6NnI8F7G2b1UVW4uY,1473 +numpy/ma/__init__.pyi,sha256=76dORzdLey4HoMD26xJFuw-2aIGrB30xt8rFqE8xafY,7404 +numpy/ma/__pycache__/__init__.cpython-310.pyc,, +numpy/ma/__pycache__/core.cpython-310.pyc,, +numpy/ma/__pycache__/extras.cpython-310.pyc,, +numpy/ma/__pycache__/mrecords.cpython-310.pyc,, +numpy/ma/__pycache__/testutils.cpython-310.pyc,, +numpy/ma/__pycache__/timer_comparison.cpython-310.pyc,, +numpy/ma/core.py,sha256=4Jv1_64eM6_aBIQ3sp4uKUU8sfxslMzeGB1AoyTTico,299477 +numpy/ma/core.pyi,sha256=2akub4Nv5xHCrhtBQ28pVG2BcnU8hi65gK-UkZP0_6o,18835 +numpy/ma/extras.py,sha256=0Od0rMKh6FLyG0byaU5kAeWcZCRfcVQRTNutMfmiCRo,72951 +numpy/ma/extras.pyi,sha256=YYuESxQTbtdLwxk_rZz7oZbg_JJMzBo92CEpFIpCWnA,3938 +numpy/ma/mrecords.py,sha256=BXglbMRYLeB5FxBcU_1vzmMZpF6iXaKS57qpTkWFm8A,27888 +numpy/ma/mrecords.pyi,sha256=oGSsEingxJ_A07fLNDrckjS7MwA8yZN_N6wkOEmRPeE,2078 +numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/ma/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/ma/tests/__pycache__/test_arrayobject.cpython-310.pyc,, +numpy/ma/tests/__pycache__/test_core.cpython-310.pyc,, +numpy/ma/tests/__pycache__/test_deprecations.cpython-310.pyc,, +numpy/ma/tests/__pycache__/test_extras.cpython-310.pyc,, +numpy/ma/tests/__pycache__/test_mrecords.cpython-310.pyc,, +numpy/ma/tests/__pycache__/test_old_ma.cpython-310.pyc,, +numpy/ma/tests/__pycache__/test_regression.cpython-310.pyc,, +numpy/ma/tests/__pycache__/test_subclassing.cpython-310.pyc,, +numpy/ma/tests/test_arrayobject.py,sha256=ap06C0a0dGWcOknpctbhLbzHSNd2M9p_JL2jESqBBGk,1139 +numpy/ma/tests/test_core.py,sha256=UfmlFEHJCksx4ad9UsP77n9bOenWqqe4peM7G5JG18k,225055 +numpy/ma/tests/test_deprecations.py,sha256=WurKSuN6hsXmWxRoxstdVBXcKCTvYxlYz-ntSkW6qKc,2650 +numpy/ma/tests/test_extras.py,sha256=C_auxUGRJ38o-7LZGNTN5IdAi48c1QIY8bzM2NozB6g,80274 +numpy/ma/tests/test_mrecords.py,sha256=TzQwlvY1iJnKH7ARsOI9nNaNeTt1sGgZAj8NEjP7jY0,20348 +numpy/ma/tests/test_old_ma.py,sha256=tQ-IqKZ1NMHq5_8qkOaZWg_rZkWBpRaPnlodBRd_ABA,33629 +numpy/ma/tests/test_regression.py,sha256=J1ftHDKfIF3SUIgQlxJplCsYTrPpAyN4rf5K1Uw5T8w,3384 +numpy/ma/tests/test_subclassing.py,sha256=UFK0R44pRCmcENP2kbI_4hRMQ7YC6qjplZNM0WeqcCM,17469 +numpy/ma/testutils.py,sha256=86e8bckl-C24JBICXzVMI_s4RqtbgZqDLD0L5tZPTgc,10564 +numpy/ma/timer_comparison.py,sha256=a3kW2PlSCDXmVrDx0VGPQ9vhcQIuDUPEnKZ54zVP810,16153 +numpy/matlib.py,sha256=DJsayODBbd0n6MmhxPmgiL28ALyLgQdHtQ5BHKggY5I,11036 +numpy/matlib.pyi,sha256=Is_0Dii3OSM58bzPXiiJV46xGUK9Nb34adHOqNlMbME,10214 +numpy/matrixlib/__init__.py,sha256=9-DMlmdLxOk5HSGJ20AuTjKkGZ3MUPHCFjhE6sb4NMo,253 +numpy/matrixlib/__init__.pyi,sha256=ZAutkmA8BpttneOyZNqAjiYJN5F7sl-WSAfIkkI2qlI,109 +numpy/matrixlib/__pycache__/__init__.cpython-310.pyc,, +numpy/matrixlib/__pycache__/defmatrix.cpython-310.pyc,, +numpy/matrixlib/defmatrix.py,sha256=jMYex3MhNKlvjgcP9EAYMt_yrVQ0O67ZuXwMRvFCff0,31918 +numpy/matrixlib/defmatrix.pyi,sha256=a4tKYShYR9EeU9Ftub3_iyGzwunUIqK_6uz_B0ZAyXQ,495 +numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/matrixlib/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-310.pyc,, +numpy/matrixlib/tests/__pycache__/test_interaction.cpython-310.pyc,, +numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-310.pyc,, +numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-310.pyc,, +numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-310.pyc,, +numpy/matrixlib/tests/__pycache__/test_numeric.cpython-310.pyc,, +numpy/matrixlib/tests/__pycache__/test_regression.cpython-310.pyc,, +numpy/matrixlib/tests/test_defmatrix.py,sha256=3cSTjFilFZVq2fMgfoUlx6hf9N4MSvBMhHcemoiUzLA,15488 +numpy/matrixlib/tests/test_interaction.py,sha256=9loMwSKXBOu09Z6aZ6_RG7ojbEfn19A8N39h12F5668,12249 +numpy/matrixlib/tests/test_masked_matrix.py,sha256=SjuUs4IhE3x2y8oM9uoWhKX4K1sX2JNkLQMlhMlvzD0,9146 +numpy/matrixlib/tests/test_matrix_linalg.py,sha256=9S9Zrk8PMLfEEo9wBx5LyrV_TbXhI6r-Hc5t594lQFY,2152 +numpy/matrixlib/tests/test_multiarray.py,sha256=E5jvWX9ypWYNHH7iqAW3xz3tMrEV-oNgjN3_oPzZzws,570 +numpy/matrixlib/tests/test_numeric.py,sha256=l-LFBKPoP3_O1iea23MmaACBLx_tSSdPcUBBRTiTbzk,458 +numpy/matrixlib/tests/test_regression.py,sha256=wpWVjM4pHRaiVX_Y5_zc6yNr4I5zWdmJfHTwbmBUhew,963 +numpy/polynomial/__init__.py,sha256=JAnPIGbR7QJilyIhHjVvA7SsWGSO1Sm0PCse-XWk3dY,6947 +numpy/polynomial/__init__.pyi,sha256=885H4pfwJHj0xFuPDsV6p_ON2nJenGjA5h8d4uMY-IY,711 +numpy/polynomial/__pycache__/__init__.cpython-310.pyc,, +numpy/polynomial/__pycache__/_polybase.cpython-310.pyc,, +numpy/polynomial/__pycache__/chebyshev.cpython-310.pyc,, +numpy/polynomial/__pycache__/hermite.cpython-310.pyc,, +numpy/polynomial/__pycache__/hermite_e.cpython-310.pyc,, +numpy/polynomial/__pycache__/laguerre.cpython-310.pyc,, +numpy/polynomial/__pycache__/legendre.cpython-310.pyc,, +numpy/polynomial/__pycache__/polynomial.cpython-310.pyc,, +numpy/polynomial/__pycache__/polyutils.cpython-310.pyc,, +numpy/polynomial/_polybase.py,sha256=SsFFCPQxtXzxDgXMsD2ovvoBL-1jQIrmdWCUMBizyPs,40648 +numpy/polynomial/_polybase.pyi,sha256=Kt1x4PgzInVS9mMR_C5d6yjJaIPcfMyhp0tp0Bz2FZk,8821 +numpy/polynomial/_polytypes.pyi,sha256=-NjNhcMP9dwCdWrIod0uRJmSNtqIQSQ6lSbvSy3aKd4,23455 +numpy/polynomial/chebyshev.py,sha256=f0h4dyuTy1KePOjKo7tBeYmLvrh1YcFBzi3i5wZyg1w,64168 +numpy/polynomial/chebyshev.pyi,sha256=AnJkNZoHyIUQvFbQfexdey-GJwN3fMjZs2pDZT6YzvQ,4917 +numpy/polynomial/hermite.py,sha256=XlsIKUW1sAGtdUqUpzZOt9BPyyDebQl_fK7zrlZI8GI,56206 +numpy/polynomial/hermite.pyi,sha256=xggYYL_74IGVlqmK9NXXIiSpGKELIcoqaOOJ0enXvPU,2551 +numpy/polynomial/hermite_e.py,sha256=QBvJfj8aOxTq4qFpY2Fjo0EZs5AEhd_ur4pIh5dq3XA,53850 +numpy/polynomial/hermite_e.pyi,sha256=CGq8MpTXOonV1JzfLdWuN_-pXOYEJG4qvNd977s11ho,2643 +numpy/polynomial/laguerre.py,sha256=ITXPSdc15HORhN5stSri5hGZyuCvv6ZxD2lYLMosSqQ,54054 +numpy/polynomial/laguerre.pyi,sha256=ftBF2ZU4CFriNY6xy8lGP-gNxRB4udAI4HVW7nkv2R0,2274 +numpy/polynomial/legendre.py,sha256=8WMBxMF_AQtfa4d46JYnQCYnbMFBTsixpVm-iBe5iDk,52599 +numpy/polynomial/legendre.pyi,sha256=590XJNm9Yl_ShYBZdcrlB65qs9DEh7OOAmeC_IXu5to,2272 +numpy/polynomial/polynomial.py,sha256=N1O1iPZeg15LQTg7W8Qcz4-J7EwDzHlhRKLFXsN10Aw,53819 +numpy/polynomial/polynomial.pyi,sha256=0KSIDRCJg1EnrZCuyQVCEKP07IiHTFHyaKPC4Po3jJI,2089 +numpy/polynomial/polyutils.py,sha256=wfNdfDePXKCqJIk8VSWjmApQN1TKpCe-YuBurYwJbi8,23287 +numpy/polynomial/polyutils.pyi,sha256=zA5UdU71NWqiKv3nAYAt5MAcJgAywHOj9lwjX8sbEro,10857 +numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/polynomial/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_classes.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_laguerre.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_legendre.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_polynomial.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_polyutils.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_printing.cpython-310.pyc,, +numpy/polynomial/tests/__pycache__/test_symbol.cpython-310.pyc,, +numpy/polynomial/tests/test_chebyshev.py,sha256=PI2XwvGGqQKEB1RxbsYRgeTG0cunB_8Otd9SBJozq-8,21141 +numpy/polynomial/tests/test_classes.py,sha256=VCcG2ICOteBolQHyfzYzMUhyqHlbAJxV8LdQm9NO50U,19057 +numpy/polynomial/tests/test_hermite.py,sha256=zHGmy1UAuKtLj5Key6BMne7ZRh3tZpowfleghQzyhqo,19131 +numpy/polynomial/tests/test_hermite_e.py,sha256=5ZBtGi2gkeldYVSh8xlQOLUDW6fcT4YdZiTrB6AaGJU,19467 +numpy/polynomial/tests/test_laguerre.py,sha256=Bm5SAKjOcQ6RlSsc8SRXYfU34mbdQ2fdMjf2E9ppznM,18047 +numpy/polynomial/tests/test_legendre.py,sha256=Vbye67yIzN7Ij2UwYZlhSt68hoNeukFHYd1QCvA70ZY,19240 +numpy/polynomial/tests/test_polynomial.py,sha256=zuJJoVLls3H2wnYeLjc514oBCx8hE5AvnbBgtQqJIzI,22660 +numpy/polynomial/tests/test_polyutils.py,sha256=b3vdtJVjC34AmEv96sw2IvIABNDqmYhCnMYZCvhtWzU,3897 +numpy/polynomial/tests/test_printing.py,sha256=_RIcZxPEUJUb8aSpdAkvnZBwBDfIyR8tKI2--w9Y64o,21854 +numpy/polynomial/tests/test_symbol.py,sha256=GZnqB4PLjZDWalREVOAI3qus9kjUDhCW-WZ_87jRmPY,5588 +numpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/LICENSE.md,sha256=tLwvT6HJV3jx7T3Y8UcGvs45lHW5ePnzS1081yUhtIo,3582 +numpy/random/__init__.pxd,sha256=g3EaMi3yfmnqT-KEWj0cp6SWIxVN9ChFjEYXGOfOifE,445 +numpy/random/__init__.py,sha256=W_hFzGsKVQfdh3-U15gzsOKKAk8uZgioDkxKyuou4WA,7721 +numpy/random/__init__.pyi,sha256=tb8imrQCSdpOL9DxD9WTBEz_Buot9aycQ4YvUr3snhM,2284 +numpy/random/__pycache__/__init__.cpython-310.pyc,, +numpy/random/__pycache__/_pickle.cpython-310.pyc,, +numpy/random/_bounded_integers.cp310-win_amd64.lib,sha256=e9c0ZYaA52NhGFbBg09w17vAZEs1RWJ761RvVKcqItc,18000 +numpy/random/_bounded_integers.cp310-win_amd64.pyd,sha256=pJuZNBhLQw_A0oNJyh4zT-Q5mzIYYnZwivL3-ZOwZ_M,230912 +numpy/random/_bounded_integers.pxd,sha256=EOKKUlF9bh0CLNEP8TzXzX4w_xV5kivr1Putfdf6yvU,1763 +numpy/random/_common.cp310-win_amd64.lib,sha256=nhGWAstVFzn87uoVFYWWhZQevBjLhFtxJeD8Jth2YRw,2012 +numpy/random/_common.cp310-win_amd64.pyd,sha256=UzFugxoy7D-CTH9Mb9_O9FGA14eKcdNYH2QrIX3gaD0,155648 +numpy/random/_common.pxd,sha256=2_9NLWFSnLG4iDd-KeYUBRa47QM8qceUsPiAkyWZ74I,5089 +numpy/random/_examples/cffi/__pycache__/extending.cpython-310.pyc,, +numpy/random/_examples/cffi/__pycache__/parse.cpython-310.pyc,, +numpy/random/_examples/cffi/extending.py,sha256=BgydYEYBb6hDghMF-KQFVc8ssUU1F5Dg-3GyeilT3Vg,920 +numpy/random/_examples/cffi/parse.py,sha256=eRBbVrnxvw0v3BS6JJvX1rjpm1MA7yZu-31QHMuNlp4,1805 +numpy/random/_examples/cython/extending.pyx,sha256=1lkq6zFifnwaMtAkVG0i_9SbMiNqplvqnHaqUpxqNzs,2344 +numpy/random/_examples/cython/extending_distributions.pyx,sha256=myr53bzJ2kVTltZx_MDcw3Q6bbh1MK1U22GKyaEi5C8,3963 +numpy/random/_examples/cython/meson.build,sha256=q_IFcVs_qzERJD_-8uaDnjps3QdaW49okZMbFtwkAPo,1747 +numpy/random/_examples/numba/__pycache__/extending.cpython-310.pyc,, +numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-310.pyc,, +numpy/random/_examples/numba/extending.py,sha256=vnqUqQRvlAI-3VYDzIxSQDlb-smBAyj8fA1-M2IrOQw,2041 +numpy/random/_examples/numba/extending_distributions.py,sha256=-aTxLIqnXW0XPtmEp0yJfaBTBcjEo9Q9SebKG_dOLvw,2103 +numpy/random/_generator.cp310-win_amd64.lib,sha256=XOZeqweIP05y-MvYQiJHMVMxv3mOuqsx6S6AdUTnEMc,18400 +numpy/random/_generator.cp310-win_amd64.pyd,sha256=0W5Alz1X3aiAIY4mK6-mi7VMlqvTYMqqmETm7eiQImY,714240 +numpy/random/_generator.pyi,sha256=flo5onsP6p2SECRZg08N3Ix9JWrcWEtqLzM0JGJQf0o,24865 +numpy/random/_mt19937.cp310-win_amd64.lib,sha256=zaAhnHYNDdR_oLP8t2p932F9YPMaVuBcVdJldT1VKBo,2032 +numpy/random/_mt19937.cp310-win_amd64.pyd,sha256=3MvenzPiN2F09Q8lOoKXZkZrpCEQBq-2U85WKeM6NCc,83456 +numpy/random/_mt19937.pyi,sha256=QB8vx8f-EGl-qz3iYGArFsfPb3Mgqldk128UeWX3kLs,800 +numpy/random/_pcg64.cp310-win_amd64.lib,sha256=MasJ7ToQb8mIYLdNi4twmobx-7ASXGA35sREUoegt2k,1996 +numpy/random/_pcg64.cp310-win_amd64.pyd,sha256=A0GZxipUC41Ncne7nPozGLel4Pm-SRCcPgCzm6fMMAU,93696 +numpy/random/_pcg64.pyi,sha256=TSID_lsjoPvfGIR4cbvGLg41VmbsHclheSt8pfBZPhs,1186 +numpy/random/_philox.cp310-win_amd64.lib,sha256=PqFomGutEpDnWJwrYJ9ejamW_0YlL2Jv6LYsrV8Nvic,2012 +numpy/random/_philox.cp310-win_amd64.pyd,sha256=OHcseXNz6oOFsIXIt4rp-6aw-arOkNKzEk5V9erIl_o,77824 +numpy/random/_philox.pyi,sha256=e7J93SwcbYrDfBfJgnuVIngiEn7NSN7k576J9pz4d54,1044 +numpy/random/_pickle.py,sha256=D5MrszR_oDD4yss3bt94MPw54FNIyH6f4MtOWBYRDvk,2832 +numpy/random/_pickle.pyi,sha256=V4UAI1td1JPMHeNMZjon30x7E7SD3WZBALC8HzQFciU,1651 +numpy/random/_sfc64.cp310-win_amd64.lib,sha256=ILER1DbWTME0JXJvxX6WXnUaaHUgCHu4Gv3kYT9lxpo,1996 +numpy/random/_sfc64.cp310-win_amd64.pyd,sha256=euXkv4Lq5b_Xdce_mQ6zJZfbbYwxbtYHVvEzlb9-E7Q,59904 +numpy/random/_sfc64.pyi,sha256=HCCIxo0H1b0_s5MEWrwttlElWEE5GKt5wV6LYxIvSxM,710 +numpy/random/bit_generator.cp310-win_amd64.lib,sha256=fQdqq29r_EcGxA-kihg_VmvNa3gZm8i026qNmf8fOc4,2120 +numpy/random/bit_generator.cp310-win_amd64.pyd,sha256=V4jo3HGfZpbb19o2RYHPTaz5CyRbVeihTCqDuxyzsCQ,164864 +numpy/random/bit_generator.pxd,sha256=LJpeB-EKeVV8_JO69sS33XJLZQ3DAhrUCNzs_ei7AoI,1042 +numpy/random/bit_generator.pyi,sha256=OqHUYtl94gRrT5AV-A7iV-0QKumGfmb3jrkCoUkI4Xc,3641 +numpy/random/c_distributions.pxd,sha256=02WeqbzQ4heQ1cZ7ShePejxmt5AOI5kTstBZ5w2WxD0,6454 +numpy/random/lib/npyrandom.lib,sha256=kIVx5GKdF4rKGw8QHyQkMwId3vXECpLjQ-VqvBUxmzg,148178 +numpy/random/mtrand.cp310-win_amd64.lib,sha256=hRzqQtFi88d8rHVhn-at8GTjUH9FSx_NjePOPEiMXG0,17122 +numpy/random/mtrand.cp310-win_amd64.pyd,sha256=3_jx7eIS5p7jATKbRBNKWqvMknocL_M0EqqMsK6bK30,607744 +numpy/random/mtrand.pyi,sha256=2e8aUstFMyrLOtQSV_SwgtM_F2UzmAF-DWKZH2xRocM,22676 +numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_direct.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_extending.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_random.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_randomstate.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_randomstate_regression.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_regression.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_seed_sequence.cpython-310.pyc,, +numpy/random/tests/__pycache__/test_smoke.cpython-310.pyc,, +numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/data/__pycache__/__init__.cpython-310.pyc,, +numpy/random/tests/data/generator_pcg64_np121.pkl.gz,sha256=EfQ-X70KkHgBAFX2pIPcCUl4MNP1ZNROaXOU75vdiqM,203 +numpy/random/tests/data/generator_pcg64_np126.pkl.gz,sha256=fN8deNVxX-HELA1eIZ32kdtYvc4hwKya6wv00GJeH0Y,208 +numpy/random/tests/data/mt19937-testset-1.csv,sha256=bA5uuOXgLpkAwJjfV8oUePg3-eyaH4-gKe8AMcl2Xn0,16845 +numpy/random/tests/data/mt19937-testset-2.csv,sha256=SnOL1nyRbblYlC254PBUSc37NguV5xN-0W_B32IxDGE,16826 +numpy/random/tests/data/pcg64-testset-1.csv,sha256=wHoS7fIR3hMEdta7MtJ8EpIWX-Bw1yfSaVxiC15vxVs,24840 +numpy/random/tests/data/pcg64-testset-2.csv,sha256=6vlnVuW_4i6LEsVn6b40HjcBWWjoX5lboSCBDpDrzFs,24846 +numpy/random/tests/data/pcg64dxsm-testset-1.csv,sha256=Fhha5-jrCmRk__rsvx6CbDFZ7EPc8BOPDTh-myZLkhM,24834 +numpy/random/tests/data/pcg64dxsm-testset-2.csv,sha256=mNYzkCh0NMt1VvTrN08BbkpAbfkFxztNcsofgeW_0ns,24840 +numpy/random/tests/data/philox-testset-1.csv,sha256=QvpTynWHQjqTz3P2MPvtMLdg2VnM6TGTpXgp-_LeJ5g,24853 +numpy/random/tests/data/philox-testset-2.csv,sha256=-BNO1OCYtDIjnN5Q-AsQezBCGmVJUIs3qAMyj8SNtsA,24839 +numpy/random/tests/data/sfc64-testset-1.csv,sha256=sgkemW0lbKJ2wh1sBj6CfmXwFYTqfAk152P0r8emO38,24841 +numpy/random/tests/data/sfc64-testset-2.csv,sha256=mkp21SG8eCqsfNyQZdmiV41-xKcsV8eutT7rVnVEG50,24834 +numpy/random/tests/data/sfc64_np126.pkl.gz,sha256=MVa1ylFy7DUPgUBK-oIeKSdVl4UYEiN3AZ7G3sdzzaw,290 +numpy/random/tests/test_direct.py,sha256=PI1C5R_WQGagdQ65sS74o_nq3ovYSDjExIDu9r3jY7k,20536 +numpy/random/tests/test_extending.py,sha256=zZBAB6VvMh-JO6kc_Fco8C4bl-wTw_GY_BCoTg-kQ-M,4561 +numpy/random/tests/test_generator_mt19937.py,sha256=ms_yBBSkxUKT0F7kjPM-PKwTi6SZvKMnmBdYkQS8a2E,120085 +numpy/random/tests/test_generator_mt19937_regressions.py,sha256=5wlQqn6jdLwPbGNZrF3RPwLn_xRj2CCA6DY167dHN7c,8300 +numpy/random/tests/test_random.py,sha256=TW-ikZicDVgTi9WeZOQwLCCCZ_Q_gWAom6PoztXSZ5k,71901 +numpy/random/tests/test_randomstate.py,sha256=RrgFeK2r5JcD4K8paWObS8nKufdGumLN2fdnvp974kI,87399 +numpy/random/tests/test_randomstate_regression.py,sha256=8FL4sxX1D1oMVX_F9u5vR8Zazo5V0Yj4bL7zsh57V-Y,8215 +numpy/random/tests/test_regression.py,sha256=_eoEa-QIYh33tESahMHsVZtCy9W_s5T5RPzI6QYS7LY,5611 +numpy/random/tests/test_seed_sequence.py,sha256=zWUvhWDxBmTN2WteSFQeJ29W0-2k3ZUze_3YtL4Kgms,3391 +numpy/random/tests/test_smoke.py,sha256=StTxeemamKeE_H_UHQWyDxIXJSbLQI4Yr5sDp3y6ZH4,28992 +numpy/rec/__init__.py,sha256=SMM69A-UzX5LD6JxSYXO-M9t4grwzRcqSAXXuMU5PSY,85 +numpy/rec/__init__.pyi,sha256=lPzA1S5UmKd5MvDDBb-afONgZYl0Gr3l5LxPB7Qyc_I,368 +numpy/rec/__pycache__/__init__.cpython-310.pyc,, +numpy/strings/__init__.py,sha256=NLFxhadn513TAXf8kgVguCvmyzXnP1JpVnNJtqfErX4,85 +numpy/strings/__init__.pyi,sha256=1Lax4CbaTiyckJDEl0FluWFnv7GZyOh5hxMnEVuNBmo,1390 +numpy/strings/__pycache__/__init__.cpython-310.pyc,, +numpy/testing/__init__.py,sha256=ENc09IN_D74xNvH33Z65Q2dkaSEvljHF_tz-BV-g_dU,617 +numpy/testing/__init__.pyi,sha256=hzSq3lVZ2gZbxMrQXNP3PaetjgJyKnfg50mkjTB8jXg,2147 +numpy/testing/__pycache__/__init__.cpython-310.pyc,, +numpy/testing/__pycache__/overrides.cpython-310.pyc,, +numpy/testing/__pycache__/print_coercion_tables.cpython-310.pyc,, +numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__pycache__/__init__.cpython-310.pyc,, +numpy/testing/_private/__pycache__/extbuild.cpython-310.pyc,, +numpy/testing/_private/__pycache__/utils.cpython-310.pyc,, +numpy/testing/_private/extbuild.py,sha256=ce56g9xEaJHUo5CqcmcpnUksdcS6tW76BNoAfGnxysg,8358 +numpy/testing/_private/extbuild.pyi,sha256=FWRL9bv2CK1FpFNLGXEJLvoZN6jgdQNnb62EENQ_u6Y,651 +numpy/testing/_private/utils.py,sha256=HU-1SLzJMa-OnJttrcLlA4UcY-FBoF7uhcxLMkNCt1s,98460 +numpy/testing/_private/utils.pyi,sha256=930ijrCmd_ZISmL4rGWSSutytCDzAiT-JJPl4fka2yY,13463 +numpy/testing/overrides.py,sha256=FRkp9cLvEwCdXWLinUH3hGf_u9SIzZk17QcRQfITZyk,2216 +numpy/testing/overrides.pyi,sha256=LMYa6hii8jPmR_eC-LHNrz3irrImvZcW29NxCkfgzNk,408 +numpy/testing/print_coercion_tables.py,sha256=BGTgZxvxnUNYqOwsceMR9xQ1LD6QUePsKLBsq8c8Vyo,6424 +numpy/testing/print_coercion_tables.pyi,sha256=O4nFjoyQ4AvDO2BrzsFi4QKaxsgmf1KDKAS-IEemPxw,848 +numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/testing/tests/__pycache__/test_utils.cpython-310.pyc,, +numpy/testing/tests/test_utils.py,sha256=xoQskILg4xhRkfHLsljkXfDHYjTtT1QkLyvNaV2KBVk,72385 +numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/tests/__pycache__/test__all__.cpython-310.pyc,, +numpy/tests/__pycache__/test_configtool.cpython-310.pyc,, +numpy/tests/__pycache__/test_ctypeslib.cpython-310.pyc,, +numpy/tests/__pycache__/test_lazyloading.cpython-310.pyc,, +numpy/tests/__pycache__/test_matlib.cpython-310.pyc,, +numpy/tests/__pycache__/test_numpy_config.cpython-310.pyc,, +numpy/tests/__pycache__/test_numpy_version.cpython-310.pyc,, +numpy/tests/__pycache__/test_public_api.cpython-310.pyc,, +numpy/tests/__pycache__/test_reloading.cpython-310.pyc,, +numpy/tests/__pycache__/test_scripts.cpython-310.pyc,, +numpy/tests/__pycache__/test_warnings.cpython-310.pyc,, +numpy/tests/test__all__.py,sha256=JziA96KUyXwWCPExbQcJBqe_RU1xQVrVwi1xhO8tzqM,230 +numpy/tests/test_configtool.py,sha256=goqOIpRq8Hrig_d6vxZGu8zluQManELhkGGDl3g9qto,1598 +numpy/tests/test_ctypeslib.py,sha256=PSiQsEpT3CoLFp56zntAEkaJJ1VMHkvE0pr8-infzKM,12728 +numpy/tests/test_lazyloading.py,sha256=vsobnlXKUfdMdqMIAZBF_DRSbYNhYF3Za4cYv-J7qHA,1196 +numpy/tests/test_matlib.py,sha256=TUaQmGoz9fvQQ8FrooTq-g9BFiViGWjoTIGQSUUF6-Y,1910 +numpy/tests/test_numpy_config.py,sha256=F0vWlR3yQyfWI3XfCxKYc6f6k3ldLDypCHbUGU_gy8E,1277 +numpy/tests/test_numpy_version.py,sha256=n4cggUNnM9okmtxwyhYBWBFwJvKpY7NzYxMgrNwRU40,1808 +numpy/tests/test_public_api.py,sha256=bn39YfPIbaVvn4cOsw7escA3F-iWLAaMKBhgeSvAXYE,28474 +numpy/tests/test_reloading.py,sha256=spEldUm_nmV0tBoUG53a2ORCOjwfltimpKfGGTqa7pI,2441 +numpy/tests/test_scripts.py,sha256=6rZN5bnGpeR4vEjLBiKEUMXJiE2NVnbY1Q8xKPlOqA8,1692 +numpy/tests/test_warnings.py,sha256=iAipwlsADKIY0BdRHd6oRv4RzOI0p0nxcqSr9DoqeLI,2422 +numpy/typing/__init__.py,sha256=rGl883L4FnRPSzNe1Zyz7_KrHvxIMobSMoLuGPPhKNI,5442 +numpy/typing/__pycache__/__init__.cpython-310.pyc,, +numpy/typing/__pycache__/mypy_plugin.cpython-310.pyc,, +numpy/typing/mypy_plugin.py,sha256=BJQGuyCEXpt-DSVgwiG1LQWDoXhbWTBRqDA3q8kk2wI,6669 +numpy/typing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/typing/tests/__pycache__/__init__.cpython-310.pyc,, +numpy/typing/tests/__pycache__/test_isfile.cpython-310.pyc,, +numpy/typing/tests/__pycache__/test_runtime.cpython-310.pyc,, +numpy/typing/tests/__pycache__/test_typing.cpython-310.pyc,, +numpy/typing/tests/data/fail/arithmetic.pyi,sha256=L0M86cIJJMUSkGlEzTX2M1z4R4EVpBJZr0lP5JfRxFc,4091 +numpy/typing/tests/data/fail/array_constructors.pyi,sha256=mrcArR9EVNE4-9yKg-SgVv_Yp-4DpZ1Q_0cHiRwXRtI,1163 +numpy/typing/tests/data/fail/array_like.pyi,sha256=vu6i3U8apd3ciHaB4_SQ2qcRnB1gR6mtp3-OvB3H1zM,544 +numpy/typing/tests/data/fail/array_pad.pyi,sha256=JGCMd_sRBYlsPQ2d7EfLaNooTsg1P0jBuD5Ds2MeXAg,138 +numpy/typing/tests/data/fail/arrayprint.pyi,sha256=KAbzVtw1V65ImeO4MhlejQt8yYB3mhCHwt0eqVqqoTY,602 +numpy/typing/tests/data/fail/arrayterator.pyi,sha256=tRPWjCh1-sg5FXAyYeTbHSR983JUFlecRNcustDLt4E,484 +numpy/typing/tests/data/fail/bitwise_ops.pyi,sha256=QglRyKkdf96Z-klBfGQ1JSmtOFk3yeSDFz0MqKS-rj0,604 +numpy/typing/tests/data/fail/char.pyi,sha256=m8SxJUaMSj2SWFHhjtJHj0b1KMPg7f1tXBjpPG_pEso,2781 +numpy/typing/tests/data/fail/chararray.pyi,sha256=inRqI3ZlDZ-R6Wpe4VoQnNzuO874E6SNcbzM9bz4xjw,2368 +numpy/typing/tests/data/fail/comparisons.pyi,sha256=xrNXGulq1kVRufLUB7nG95g_YNr_wR5hbIdhy0tkRMc,849 +numpy/typing/tests/data/fail/constants.pyi,sha256=3IZ6T9p4n61qIXngrHB8VqEaqloxcNmbUz3YcSqNSXI,88 +numpy/typing/tests/data/fail/datasource.pyi,sha256=mX9ucsgNXNekVFuRVzBjleA-p8GpuwpbsHqiG6a9CpA,420 +numpy/typing/tests/data/fail/dtype.pyi,sha256=ltT4BFaX_KTVdRLw2dMg3_OiSNYjDSNrXsxby6eeLTw,354 +numpy/typing/tests/data/fail/einsumfunc.pyi,sha256=dYOaJ0J4EUzdyUBikKHie99K8SMaYrlqN3R9aDcMeJ4,499 +numpy/typing/tests/data/fail/flatiter.pyi,sha256=u4-JnRsydg5BW3OcA9we8MXLJ6F5cuaxxw0BrHVA9kY,891 +numpy/typing/tests/data/fail/fromnumeric.pyi,sha256=cN_nAgj2y2_wkErPsP1zAxG0CmHQmmeO4g7qkA9FsWY,5868 +numpy/typing/tests/data/fail/histograms.pyi,sha256=JteTXgK_kXD8UPdihMZ_T2VcM3rTBj6t-MMRP8UHvhw,379 +numpy/typing/tests/data/fail/index_tricks.pyi,sha256=63ADYRCVtf0Dapc2dJpYJZDSIXK3MhhW_1lG30d3-RY,523 +numpy/typing/tests/data/fail/lib_function_base.pyi,sha256=uvVKoZP0Mx-8V8DMCnLWoe8lk6eRT3eSAxqNFpylwEQ,2751 +numpy/typing/tests/data/fail/lib_polynomial.pyi,sha256=PM1TD9h4tFNeMp4y6HlXHKuAHDW0bfNHw0UWLUHnLVk,928 +numpy/typing/tests/data/fail/lib_utils.pyi,sha256=chR5zMEM5KI2Aw0LPIlIC8CnEcPIHwyKMLzbPhXNYXU,99 +numpy/typing/tests/data/fail/lib_version.pyi,sha256=JWtuTLcjkZpGfXshlFpJO5vINxawn9S-mxLGH0-7kcw,164 +numpy/typing/tests/data/fail/linalg.pyi,sha256=j6GGpOENz0nuZsza0Dyfy6MtjfRltqrbY8K_7g5H92I,1370 +numpy/typing/tests/data/fail/memmap.pyi,sha256=eAX-nEKtOb06mL8EPECukmL8MwrehSVRu5TBlHiSBaQ,164 +numpy/typing/tests/data/fail/modules.pyi,sha256=HYfnYNKIRwGg2caw19iqN1MDcctFMQKlE4mqoasWDaM,638 +numpy/typing/tests/data/fail/multiarray.pyi,sha256=AMsYk58-B30xQTHirBGAC6vykmauw-S7H_YiHSLOAQA,1696 +numpy/typing/tests/data/fail/ndarray.pyi,sha256=5A83TCpAmaUC0rtOU0NVG0vsNfKo_-1SF5qtVT7eqoc,415 +numpy/typing/tests/data/fail/ndarray_misc.pyi,sha256=ew0rklpnwM-57zZTCY7nczMS_tj8y7rxKTcnmjayPlU,1036 +numpy/typing/tests/data/fail/nditer.pyi,sha256=We6p5_nmfUdd_4CtwYZc5O7MTSMyM-Xw7mEUzdKPcP4,333 +numpy/typing/tests/data/fail/nested_sequence.pyi,sha256=7E1zJ2SZIF0ldbEmjtA_Bp6cV4Q-cS4Op0BJN3Vi3rc,444 +numpy/typing/tests/data/fail/npyio.pyi,sha256=CT-NXoisYmIy-WBGaZkCm8zHPCL2Ju5Moy021vnEhIU,653 +numpy/typing/tests/data/fail/numerictypes.pyi,sha256=wPJaHwMdiX1tJLdnYAgZ5z42tEhX-8EtGfWKU81czf4,125 +numpy/typing/tests/data/fail/random.pyi,sha256=v_Y-EfhC7PC8E3AH-v-AfiZVlJDSShL77WQ3yXWx5iE,2883 +numpy/typing/tests/data/fail/rec.pyi,sha256=BxH41lR1wLvLrlash9mzkPFngDAXSPQQXvuHxYylHAI,721 +numpy/typing/tests/data/fail/scalars.pyi,sha256=gN2pS35JX6MOCZTzL_1ml53510Kjr2dfVclLZrOwCpE,2951 +numpy/typing/tests/data/fail/shape.pyi,sha256=-SzfxgevV7APDLlq-Sh8KzsKdCjHUb5GXEeJ9H6tacQ,143 +numpy/typing/tests/data/fail/shape_base.pyi,sha256=ZU1KSP0k-i-npwIMUhp42-EMzrdZhOqPEnV8ah-ZJ6U,160 +numpy/typing/tests/data/fail/stride_tricks.pyi,sha256=L0fJGun6CDq24yNdw2zeNVGGcIpEOyP2dmWj1pEbMz8,324 +numpy/typing/tests/data/fail/strings.pyi,sha256=XAiAwOERfMOL9INbER33qH-7_5rPGX4eubGcWsl36Fc,2429 +numpy/typing/tests/data/fail/testing.pyi,sha256=GYfvI1A2pB1Ii2jFVL-WGqRVimbFS2oCijmoWVbMAgw,1371 +numpy/typing/tests/data/fail/twodim_base.pyi,sha256=wzd-h1ye2BhMdIHlQ0ZcHfgYRBHVX2GJ3WGfMk5euPg,935 +numpy/typing/tests/data/fail/type_check.pyi,sha256=0KG0c2LNUbUFChTYtbJ38eJUmfvUJl4Cn5G0vh1Bkrw,392 +numpy/typing/tests/data/fail/ufunc_config.pyi,sha256=WzZzWJ-cC39qAzak3Cf--XIZX11MqwsEa3bYYyzqsvY,755 +numpy/typing/tests/data/fail/ufunclike.pyi,sha256=89Fjsr7vmurRl90mVbC5L0xOwRIk0jg4mJrgkTDn4eM,648 +numpy/typing/tests/data/fail/ufuncs.pyi,sha256=2ATU0I4ZF8DB3vyodRDJIuXnXb-CcQpt-l4Kn00kJxA,493 +numpy/typing/tests/data/fail/warnings_and_errors.pyi,sha256=4sTfiur0rV5CpjlYJC_1WV3KPnovteiImffvpYh19eU,190 +numpy/typing/tests/data/misc/extended_precision.pyi,sha256=RTsXUAM9iKX_L-iviwFVuUwKcqX9N8sRW5ZHAXjYtjc,909 +numpy/typing/tests/data/mypy.ini,sha256=TIOl-4bxGj7Q5DAYamOE_pBLnXMQf1quG7Maena9CRY,295 +numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_like.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/dtype.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/literal.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/ma.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/mod.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/modules.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/nditer.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/numeric.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/random.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/scalars.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/shape.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-310.pyc,, +numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-310.pyc,, +numpy/typing/tests/data/pass/arithmetic.py,sha256=T2IizTDJ0bkGhPk5rsD5dpeEmNfPxWrVpmOB1eyY7As,8043 +numpy/typing/tests/data/pass/array_constructors.py,sha256=MGzgCt7uTeC_b7wU2aPlvTuDzXfgOujx_lR0Vqfpny8,2584 +numpy/typing/tests/data/pass/array_like.py,sha256=qLqVJLU8bjSIB3xFNCzRNAcozCWRAVLagiYYG7ewQJA,1101 +numpy/typing/tests/data/pass/arrayprint.py,sha256=NTw1gJ9v3TDVwRov4zsg_27rI-ndKuG4mDidBWEKVyc,803 +numpy/typing/tests/data/pass/arrayterator.py,sha256=z4o0H08T7tbzzMWhu5ZXdVqbivjBicuFgRHBk_lpOck,420 +numpy/typing/tests/data/pass/bitwise_ops.py,sha256=8lfjgayfTDDcWi1O-rnxLu4FZqvskvGHvFXJpMQWQgc,1095 +numpy/typing/tests/data/pass/comparisons.py,sha256=-NSAhFNN3kWqu2CZqt2pq3kflTx6nDCWxkO3JIYl5NI,3613 +numpy/typing/tests/data/pass/dtype.py,sha256=YRsTwKEQ5iJtdKCEQIybU_nL8z8Wq9hU-BZmEO7HjQE,1127 +numpy/typing/tests/data/pass/einsumfunc.py,sha256=CXdLvQsU2iDqQc7d2TRRCSwguQzJ0SJDFn23SDeOOuY,1406 +numpy/typing/tests/data/pass/flatiter.py,sha256=2xtMPvDgfhgjZIqiN3B3Wvy6Q9oBeo9uh4UkCAQNmwg,190 +numpy/typing/tests/data/pass/fromnumeric.py,sha256=bP0hEQYYQJOn7-ce0rAf8cvuxZX3Ja6GSSlCtNhEBUM,4263 +numpy/typing/tests/data/pass/index_tricks.py,sha256=RyuEtqyZVlK9j403DVjMZFd80mvt-VAMi1uGvXurc0c,1462 +numpy/typing/tests/data/pass/lib_user_array.py,sha256=K69fg9dI5BaglzpiJh13swGHuyx3LBW_zmzBBOB1aWw,612 +numpy/typing/tests/data/pass/lib_utils.py,sha256=XEc0v7bwES-C5D4GkSJQSSTSAl5ng7tq6tCWj3jxbCM,336 +numpy/typing/tests/data/pass/lib_version.py,sha256=TlLZK8sekCMm__WWo22FZfZc40zpczENf6y_TNjBpCw,317 +numpy/typing/tests/data/pass/literal.py,sha256=sWAaQyBnm3jIEZrdqWe58U2sCzeE7mUSlG8tWIcQzRc,1555 +numpy/typing/tests/data/pass/ma.py,sha256=LfK4LXCWLLK5q0c1Me8STWbhGj9b_46LYvZwXGpaEjQ,179 +numpy/typing/tests/data/pass/mod.py,sha256=L1qLwjdrRo9Tx7mxWpf_ugdKdUprDYhPRbCvQd5QjXY,1725 +numpy/typing/tests/data/pass/modules.py,sha256=buzLurat4TIGmJuW3mGsGk7dKNmpBDfQOWWQXFfb9Uc,670 +numpy/typing/tests/data/pass/multiarray.py,sha256=i6VU-VN96Q16mRGzVoY3oTE2W1z16GOGTOVFxWGRacM,1407 +numpy/typing/tests/data/pass/ndarray_conversion.py,sha256=6TnvucV8Vtte7dGWihx7YmrHlNOanqmLJIH1W8Wok0E,1612 +numpy/typing/tests/data/pass/ndarray_misc.py,sha256=nI6loZ67OjL3Uzu0AQYsHrI-a_gq5SCzVzJqSiTKDc0,3662 +numpy/typing/tests/data/pass/ndarray_shape_manipulation.py,sha256=yaBK3hW5fe2VpvARkn_NMeF-JX-OajI8JiRWOA_Uk7Y,687 +numpy/typing/tests/data/pass/nditer.py,sha256=1wpRitCNZKCC3WJVrFSh22Z1D8jP2VxQAMtzH8NcpV8,67 +numpy/typing/tests/data/pass/numeric.py,sha256=E6JrIBZ8yaEDn4hkaePxcdYdkC6VKZUKSu_Z65Rsqkg,1720 +numpy/typing/tests/data/pass/numerictypes.py,sha256=JaCjk4zQPOI67XzqGyi3dI-GUMFM2AvDuniwzSQ7_Rk,348 +numpy/typing/tests/data/pass/random.py,sha256=wYwClLry-mN-QvaYg6AFGhwDuvoKQv-bl94fq10sL3k,63321 +numpy/typing/tests/data/pass/recfunctions.py,sha256=aeOxXwMkhc0aXyhmg4dW2QvekHDGAaYYTHVaQwrfKGY,5199 +numpy/typing/tests/data/pass/scalars.py,sha256=KfCYjDIxR9G2ypqCQJKQOuBWxiLqrGCV38q0JN3TqvA,3973 +numpy/typing/tests/data/pass/shape.py,sha256=Wr_y3KiVe5elHXLChRVupFvE_haiEFilCvk-ESR1Rcg,470 +numpy/typing/tests/data/pass/simple.py,sha256=aXvt9iCOV1lhQR11xVWgQIXXyXRHKOBfCtTjthZFtM0,2919 +numpy/typing/tests/data/pass/simple_py3.py,sha256=OBpoDmf5u4bRblugokiOZzufESsEmoU03MqipERrjLg,102 +numpy/typing/tests/data/pass/ufunc_config.py,sha256=gmMTPrq8gLXJZSBQoOpJcgzIzWgMx-k_etKPV4KSTJk,1269 +numpy/typing/tests/data/pass/ufunclike.py,sha256=jxTR61d0bmFg7JHZmw992ccRua00u4XWJYtcQRJwFS0,1172 +numpy/typing/tests/data/pass/ufuncs.py,sha256=gvdcCNoGUfN0CnQmn6k1j6ghdt8zGkJdcRcgctmU48A,438 +numpy/typing/tests/data/pass/warnings_and_errors.py,sha256=q3c1SmMwhyYLYQsLjK02AXphk3-96YltSTdTfrElJzQ,167 +numpy/typing/tests/data/reveal/arithmetic.pyi,sha256=imnR_OsLBkd_dT5Xa1N_hEJ_I4ahDmXUEpybPGsJTcg,25972 +numpy/typing/tests/data/reveal/array_api_info.pyi,sha256=zeNMHOn1HoTFaJTXkz5_GuFg3OvRa7W-gdxdJl1FPG4,3119 +numpy/typing/tests/data/reveal/array_constructors.pyi,sha256=jq0TvzyKKqLq3DWcuvhsI3oIbBtj6ikhO82S_ZWjI2I,12817 +numpy/typing/tests/data/reveal/arraypad.pyi,sha256=viQwv8d_Hsc5nhIqC4cGkRWbmXaqf3ehutPnmOleDkY,712 +numpy/typing/tests/data/reveal/arrayprint.pyi,sha256=JZmfW3bqJWY6TUM3JDMyVBS3cSTopPkBF5O8yzD3kiU,844 +numpy/typing/tests/data/reveal/arraysetops.pyi,sha256=gA2uqkyPESPjasbca_mZ-e41XkecogZVOpRDSZpSl38,4496 +numpy/typing/tests/data/reveal/arrayterator.pyi,sha256=a0gVqOyltcfjoBqwXiouAt3ghZUwFUZo0s4xg1VZGrI,1098 +numpy/typing/tests/data/reveal/bitwise_ops.pyi,sha256=8snEpx2ci_08vZuWWHZ81KrRflUxSGvtae2_9d5uKWo,5219 +numpy/typing/tests/data/reveal/char.pyi,sha256=AyDGKchLOkZ8TFaTl7DTb1-zLCNX1bsjHlMWlJW0QRU,11065 +numpy/typing/tests/data/reveal/chararray.pyi,sha256=0ZJp_G4AzrO0r2ntt082iCbeZ2cnoB1iPB4YoFvYNuc,6787 +numpy/typing/tests/data/reveal/comparisons.pyi,sha256=kMeqY8sAwL8_joBBTMo65V1wCAfQCjppBkmhN_ax0Ds,7491 +numpy/typing/tests/data/reveal/constants.pyi,sha256=pg8eBcAYp-7Xc-5iAgzPR8c4qpo89f9Cj2LKBatsu7Q,377 +numpy/typing/tests/data/reveal/ctypeslib.pyi,sha256=42zUf_JaEvC2F8JJtCv2G2YLR51L0V_wOpxulQKBJYk,4830 +numpy/typing/tests/data/reveal/datasource.pyi,sha256=H2QtFrQWad_gRlGkTZ7subBfTHjoAhwzUCdYuS_d9C0,638 +numpy/typing/tests/data/reveal/dtype.pyi,sha256=f-Ev24OaSP6ChpI6Xn5j4VOUKGoX6Ixvs95yJFjHuf0,5353 +numpy/typing/tests/data/reveal/einsumfunc.pyi,sha256=wx3i7hQdY1mhmu6fnvGLYlN0yVByZT-fPLEMhfEkLls,1997 +numpy/typing/tests/data/reveal/emath.pyi,sha256=TC5sIisHUbcS0EDnVfokZu072bfo2qy1lF6XYQBoTeI,2391 +numpy/typing/tests/data/reveal/fft.pyi,sha256=02MXu-BxNyrIcSVBXLgC0poNsNGiXDWhWkGbY2zvZmw,1700 +numpy/typing/tests/data/reveal/flatiter.pyi,sha256=0L4ImsC4qP-bujm0czaLAMs_J57bUEUJL6CNi8L44Gw,1426 +numpy/typing/tests/data/reveal/fromnumeric.pyi,sha256=cj1x-A12dZaf7wRzFkiNzsrRpOGlSXzcLftMYeqngBg,15260 +numpy/typing/tests/data/reveal/getlimits.pyi,sha256=jp4uMJIJ2QooTSo765kdY9x1sphgankB4soyWL-CJHs,1635 +numpy/typing/tests/data/reveal/histograms.pyi,sha256=GMA2nwIztaW6nYbJ8r6wNLiQvL0V0CssJ4uB_qEfiuw,1314 +numpy/typing/tests/data/reveal/index_tricks.pyi,sha256=mLlymGWD-8uafZ4iDEgeeIi2BwAts5_w-6koRPdI2fQ,3343 +numpy/typing/tests/data/reveal/lib_function_base.pyi,sha256=pEVGWIuBbo8ymrIybovkhyNFL8iYF7LC-01O932gltQ,10087 +numpy/typing/tests/data/reveal/lib_polynomial.pyi,sha256=5ipkWMQOnEq59oxAdHHqD-9Ion5CnfzcVc69K5y-KPs,6041 +numpy/typing/tests/data/reveal/lib_utils.pyi,sha256=hc8SEa5GX2bvey1lPdjkHeQJsGzFTbYaqvkXQWzFGZA,466 +numpy/typing/tests/data/reveal/lib_version.pyi,sha256=nAmE8-EYApx_o3Ih6XNcnGAT9gmwYn3emcH_jMjvqF0,603 +numpy/typing/tests/data/reveal/linalg.pyi,sha256=v32iuvfKkf9b68M4EzApRNb33FYYK4l2h8WtA_DYSoM,6366 +numpy/typing/tests/data/reveal/matrix.pyi,sha256=iIyDGTXx-tNFvc3_cpTsh-5qdCT1RSZG7hoHku79BlA,3122 +numpy/typing/tests/data/reveal/memmap.pyi,sha256=NzJkfVKfej4RQeVY9K-hkN_h4_5AOy6k6WtXT8FMW1U,775 +numpy/typing/tests/data/reveal/mod.pyi,sha256=gdJr_fx0lxU9ISMQMtwb1_6J0J_N5b-04mPVUPSUpxk,7792 +numpy/typing/tests/data/reveal/modules.pyi,sha256=bVHJ0-4XHxHwF4G6YFtQx4E7i9NjXJX4Xl1-2suWxL4,1922 +numpy/typing/tests/data/reveal/multiarray.pyi,sha256=EWlKpzai4sIdRSi9HX-jfOfBWQY79e2L3gdcb3TSV5A,8061 +numpy/typing/tests/data/reveal/nbit_base_example.pyi,sha256=L623ocsRjDAvFWEBnV1-D6E68v0beBDvXQ5tTDGSjaU,610 +numpy/typing/tests/data/reveal/ndarray_assignability.pyi,sha256=bWJadq5zn_6ya0mSryzYh2jmCqFS6y_5oVukAvUj-1w,2777 +numpy/typing/tests/data/reveal/ndarray_conversion.pyi,sha256=dP7RIN2JoQLyiATFYxxhH81ig76SXVCJXAxGhk7LPkE,3465 +numpy/typing/tests/data/reveal/ndarray_misc.pyi,sha256=mUys3JQqq9AjiaJ9VaEE6mJ1806iqy9Gy1um7DHGqtk,8137 +numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi,sha256=GOMRYAh2un_a9YMnhNus1icBEctWBM-JiE0XUGenADk,1444 +numpy/typing/tests/data/reveal/nditer.pyi,sha256=6sV8LI8D5F9nNWwbcLBQSWubRbnIZ-5EVveHi3G_bJs,1984 +numpy/typing/tests/data/reveal/nested_sequence.pyi,sha256=IHoDdBHrPR9t7Hlu5uS7Gwmdhpry1vnJxjoZYACWbjk,674 +numpy/typing/tests/data/reveal/npyio.pyi,sha256=fW3S6E7_Lc290CD_Cu_nwLiA7rDzyZTWsNp7opI5Ono,3608 +numpy/typing/tests/data/reveal/numeric.pyi,sha256=1viOAuH2hv0NietbvxyVEpeQJacMqy7ck-mQpRHxEB4,6218 +numpy/typing/tests/data/reveal/numerictypes.pyi,sha256=UfImexHrS0BEJIHdtdHnDCFaeokhEKtA08_ArfhsXus,1414 +numpy/typing/tests/data/reveal/polynomial_polybase.pyi,sha256=lNwOtFZ9wv3CIuPJwnwUNHyAStBv97LP423erNq90X8,8220 +numpy/typing/tests/data/reveal/polynomial_polyutils.pyi,sha256=BJX4_LBPqbPQ1eY-br4SqHUHO9zvIrOxa_J2TKrufWg,10984 +numpy/typing/tests/data/reveal/polynomial_series.pyi,sha256=NMH4MqVHDNNI1mLewAZg9Zin5EoDoYik_Q-YhbQa5gk,7268 +numpy/typing/tests/data/reveal/random.pyi,sha256=8llrjYb6sRt7qblJiOniDKCrC1ERE3JNfenqIPX-2UI,105880 +numpy/typing/tests/data/reveal/rec.pyi,sha256=sv6b2EJApQUcucb205S_GBtPVd675T_8VgdeVn78jqY,3944 +numpy/typing/tests/data/reveal/scalars.pyi,sha256=Ch1zywWqKdpyvTJDRGRdfzZ6to-VxWI512MuNwnY00M,6642 +numpy/typing/tests/data/reveal/shape.pyi,sha256=M0joDPodElAHLjI9FmofIgS45uSidjXOnfpbKwyBaZ8,307 +numpy/typing/tests/data/reveal/shape_base.pyi,sha256=tn2gaV-VfJLPp18NH3qGxXVoOZXm4nZj4pKrF5IonlE,2100 +numpy/typing/tests/data/reveal/stride_tricks.pyi,sha256=YFA73zIg8YRoDnDi6VKc-3CRZoOJZ_YhNlqiyqrthLE,1374 +numpy/typing/tests/data/reveal/strings.pyi,sha256=TSRS1RV7KdjVMjfAwJr_ja60et1LqWWrdD-hop_mikQ,9608 +numpy/typing/tests/data/reveal/testing.pyi,sha256=Ym8uuMNq1l6VqZsN43ghEL8DpZKFX4kisH-BV79xFNU,8683 +numpy/typing/tests/data/reveal/twodim_base.pyi,sha256=g-fZ2g9vh4XYIltv2sSgsAtl0yg_EwW3NdhKIFFVG6o,4451 +numpy/typing/tests/data/reveal/type_check.pyi,sha256=i-8YsIOGxgxdleMzwO8ctaLfYzcnBUKDBY7TiwK-IHA,2790 +numpy/typing/tests/data/reveal/ufunc_config.pyi,sha256=w4XdMT8Rz2pu6XLmVhb-F4hnCb--A_dYYjBJCtWVZ5s,1222 +numpy/typing/tests/data/reveal/ufunclike.pyi,sha256=7-LodGrHthBQuR42rQlXw3nQsVqHGYdxzu73B2ps1tY,1266 +numpy/typing/tests/data/reveal/ufuncs.pyi,sha256=mSu8DtwP2b-l45nrxWwKiNGeALQbG6qAKBYg_OLOqWc,4944 +numpy/typing/tests/data/reveal/warnings_and_errors.pyi,sha256=TqQe7189p4B7PAbuIQbGNkLykuCpq2ngCB2Bscsh_bw,471 +numpy/typing/tests/test_isfile.py,sha256=slpVB1kHtrG5unlgYxl94Q_kOzDBPnDtFZQhLZdq9JM,897 +numpy/typing/tests/test_runtime.py,sha256=p-Ydvt0Rt6mPHmAKYOOAGxxXQnjoARJSVZmViKMAX0A,3384 +numpy/typing/tests/test_typing.py,sha256=ZVc9wJgtAKRX6S1lkSiR6Y9w_Dxwl0TLN-rAvzJBSFw,8594 +numpy/version.py,sha256=HxgbOvcF3g3JX8sI0ljw0nI02aECprcAr4N8JUULIpY,304 +numpy/version.pyi,sha256=WPYF3zFF92LnJu7CGTRsh4osMyXBuQRpMvAuoxKMrbw,408 diff --git a/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/WHEEL b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..148e77b3f434fe39f597b0af17ecb80a5b72fd10 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp310-cp310-win_amd64 \ No newline at end of file diff --git a/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/entry_points.txt b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..963c00f7069bbcd2075093df390c8bfd73a109ce --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy-2.2.6.dist-info/entry_points.txt @@ -0,0 +1,10 @@ +[array_api] +numpy = numpy + +[pyinstaller40] +hook-dirs = numpy:_pyinstaller_hooks_dir + +[console_scripts] +f2py = numpy.f2py.f2py2e:main +numpy-config = numpy._configtool:main + diff --git a/pythonProject/.venv/Lib/site-packages/numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll b/pythonProject/.venv/Lib/site-packages/numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll new file mode 100644 index 0000000000000000000000000000000000000000..e71a456c461eebca84058e6f2f8b5002f689dd08 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4c2229bdc2a2a630acdc095b4d86008e5c3e3bc7773174354f3da4f5beb9cde +size 575056 diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/__config__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/__config__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84336a507699724a0e22c197c2e8eed14207d60b Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/__config__.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33358527e4807029fd628e7664c373a1beff9e6b Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/__init__.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_array_api_info.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_array_api_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f4802fd91eb35143e8f92eab9697440154b2e67 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_array_api_info.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_configtool.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_configtool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23a8c0e6d162eac84480a8352b3688b3c0fac176 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_configtool.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_distributor_init.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_distributor_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4aee73ce753d32c2fbc6af854f8fa66863ce85c Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_distributor_init.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e257c452ed309f79a814c8d61f3859c610324bda Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_globals.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_globals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..558cf66577fe4d0a75aa34c3f650c583519720ae Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_globals.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_pytesttester.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_pytesttester.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28d44931385a2120cec6ddeca941ec6165cb13ce Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/_pytesttester.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/conftest.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bd00ab3b61104fe60b484662493f8c8b404c037 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/conftest.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/ctypeslib.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/ctypeslib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15c6b48f5e0ddc49b8edafaab3852d75cc9e16eb Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/ctypeslib.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/dtypes.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d38346dbf409e1b3760acc63ae99541b1ee3f84f Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/dtypes.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/exceptions.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c217f608793fa68c112b0399fd11eb12409fd5dc Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/exceptions.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/matlib.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/matlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94bd944da8f441d4b6dd68ef08e09c0473f4640d Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/matlib.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/version.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..562bee3dd594b7fe2e1834325b0ce8d0614c1395 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/__pycache__/version.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..718fa90e9f205c38b5715ab4dd844326291c27f1 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/__init__.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da658b0c94f47e3f2e8146449fb740185f5ceeed Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_asarray.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_asarray.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caaa5717c0a568fc6344711a70e92d8a9e3ba0af Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_asarray.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_dtype.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebc3e2dcc279c4a1a21d19359debd1a21541c640 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_dtype.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7a28f83f38d4ce8dae1fab22c790c4279231039 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_exceptions.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d1cb266fd2159587cd3abd27fcbe33541659823 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_exceptions.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_internal.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a826a461c5d95ce3d4e58820f9db633068043b52 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_internal.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_machar.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_machar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0364677a59510ed38700ee9e2bdf82856175592a Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_machar.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_methods.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e764c891a13e98a75915b7da90cc4c484cb343a Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_methods.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56ed6efed5996c656b2c332d6d5f537a04ea1893 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33c895f6c13807fa2f10cec3777c97d7af2da0a1 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b6295a263c4599a8fbe870b12de974a6664cb24 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/arrayprint.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/arrayprint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4ce290802a396236ba0988da5d5465f6036a67f Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/arrayprint.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/cversions.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/cversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..885ae0335987020f014c56a8100eae163d91c005 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/cversions.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/defchararray.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/defchararray.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b02b2a6109b11e82cef18d8b656196d1d98f8d8 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/defchararray.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a5914cc1851e50430a716bfb30dc5944137db5a Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/function_base.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/function_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf2ccc06d8da2cba906da20d7fda7a91eb82f9a4 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/function_base.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/getlimits.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/getlimits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..461b73d11ea170ff00afed9318850fdd5bac06cb Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/getlimits.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/memmap.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/memmap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9290c908100c40f2a1c26ca9cdcdbad3ebb0b537 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/memmap.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/multiarray.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/multiarray.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb7b4c9343f48d87a6975a893cbba798e7f6207b Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/multiarray.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/numeric.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/numeric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17a2d4dfc26291c8e23dccf393365b6f3d3c8691 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/numeric.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/numerictypes.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/numerictypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6111b51bae33693950124de69d08828426f73447 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/numerictypes.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/overrides.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/overrides.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed3b9ab1420769536bc3ee170f070635c96cbf65 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/overrides.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/printoptions.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/printoptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f365cd4e9247eb3e3e12d871c481bb41a4783490 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/printoptions.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/records.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/records.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa02793566e02a72e5ea6758ecbc70782ac6384a Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/records.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/shape_base.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/shape_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aafd61dfc575a96734b5d2c749295ae4b5b291ab Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/shape_base.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7260b2740696e64944157e22a471587274a4bbc2 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/umath.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/umath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5205607aeb4448284e2c5ce7d7a8abd7a8bdca39 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/__pycache__/umath.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/_locales.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/_locales.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba20bb66a2a980e4090d50dfb9005f282e300563 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/_locales.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/_natype.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/_natype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f0622f32eb7d90bbdb622b83fa75b5f4bb99ccd Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/_natype.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test__exceptions.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test__exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b011342dd12c7b3113d23e971ae2834adfe8240 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test__exceptions.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_records.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_records.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c851ba420249a1cf9d5b54b4d869749fa6a0d17 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_records.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a87e551fd62e63d762f636c7cad3e6b0aac2cf63 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_regression.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b30675a80c4a3d7654959f0b67e56ac2caf46951c6a848185e542e66e99659a2 +size 101103 diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d9f2769cd7550ba4283b770865a4b6f637af9fc Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0ae745e0500d56c8fc6316e762b6c1a02ad3de7 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64d189372ba4b8e9d6c49ef7344bf98b57df9ec5 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbb91524a880e3a0be03350a9c7ce395f4f66777 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarmath.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarmath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d791d8dc1b37716607222865cb75c3d4c8e49eb Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarmath.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarprint.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarprint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..064e326299061797012319cc9f6b1ee6f8a1c8b1 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_scalarprint.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_shape_base.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_shape_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01935f24b1aa565000d921597947a5953e29ac67 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_shape_base.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_simd.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_simd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c171e7d60dd46f268959c5c55399766cbd34dd8d Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_simd.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_simd_module.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_simd_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bad6c9f823a411230a670b4b6eb12b8ebe72e43 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_simd_module.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_stringdtype.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_stringdtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9fc341a368902d514a90938c6600ea5aa26d71d Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_stringdtype.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_strings.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_strings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..465dacaee5e67f5ed3dd5ced98c787ee04fc8bbe Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_strings.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..702613d9393d4e017cf423413ef9c59526510e32 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_umath_complex.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_umath_complex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4613825af88b1fee59b7096cda6714905c215866 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_umath_complex.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_unicode.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_unicode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab76d8206c848a6df53ed54afb00ccce05bfe97a Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_core/tests/__pycache__/test_unicode.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__init__.py b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__init__.pyi b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cccf2434f92b31662da95ed7ce06cac2c8657021 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__pycache__/__init__.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2c8766b9fcf89f35053b9baeb5e9c9f7aff471b Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/hook-numpy.py b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/hook-numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..babb5a67f0796f132db824af0397f72f19251df3 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/hook-numpy.py @@ -0,0 +1,36 @@ +"""This hook should collect all binary files and any hidden modules that numpy +needs. + +Our (some-what inadequate) docs for writing PyInstaller hooks are kept here: +https://pyinstaller.readthedocs.io/en/stable/hooks.html + +""" +from PyInstaller.compat import is_conda, is_pure_conda +from PyInstaller.utils.hooks import collect_dynamic_libs, is_module_satisfies + +# Collect all DLLs inside numpy's installation folder, dump them into built +# app's root. +binaries = collect_dynamic_libs("numpy", ".") + +# If using Conda without any non-conda virtual environment manager: +if is_pure_conda: + # Assume running the NumPy from Conda-forge and collect it's DLLs from the + # communal Conda bin directory. DLLs from NumPy's dependencies must also be + # collected to capture MKL, OpenBlas, OpenMP, etc. + from PyInstaller.utils.hooks import conda_support + datas = conda_support.collect_dynamic_libs("numpy", dependencies=True) + +# Submodules PyInstaller cannot detect. `_dtype_ctypes` is only imported +# from C and `_multiarray_tests` is used in tests (which are not packed). +hiddenimports = ['numpy._core._dtype_ctypes', 'numpy._core._multiarray_tests'] + +# Remove testing and building code and packages that are referenced throughout +# NumPy but are not really dependencies. +excludedimports = [ + "scipy", + "pytest", + "f2py", + "setuptools", + "distutils", + "numpy.distutils", +] diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/hook-numpy.pyi b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/hook-numpy.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2978b8be623b3dbdbaec791bb0f430ba0f2fb460 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/hook-numpy.pyi @@ -0,0 +1,13 @@ +from typing import Final + +# from `PyInstaller.compat` +is_conda: Final[bool] +is_pure_conda: Final[bool] + +# from `PyInstaller.utils.hooks` +def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... + +binaries: Final[list[tuple[str, str]]] + +hiddenimports: Final[list[str]] +excludedimports: Final[list[str]] diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__init__.py b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c4392946a64fd3746f010035c57f6dde9f71eac8 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__init__.py @@ -0,0 +1,16 @@ +from numpy.testing import IS_WASM, IS_EDITABLE +import pytest + + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5937d33450cef87af229b61485b3d91eb337380 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b714e0a2f0036c85a8982d8c36fda5678d8b1f4 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a4a8266ab78afaa43e78bce7f64a5288cf93c72 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py new file mode 100644 index 0000000000000000000000000000000000000000..99b8358ba04897cc0d2a534701648df742a733dc --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py @@ -0,0 +1,32 @@ +"""A crude *bit of everything* smoke test to verify PyInstaller compatibility. + +PyInstaller typically goes wrong by forgetting to package modules, extension +modules or shared libraries. This script should aim to touch as many of those +as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure +due to an uncollected resource. Missing resources are unlikely to lead to +arithmetic errors so there's generally no need to verify any calculation's +output - merely that it made it to the end OK. This script should not +explicitly import any of numpy's submodules as that gives PyInstaller undue +hints that those submodules exist and should be collected (accessing implicitly +loaded submodules is OK). + +""" +import numpy as np + +a = np.arange(1., 10.).reshape((3, 3)) % 5 +np.linalg.det(a) +a @ a +a @ a.T +np.linalg.inv(a) +np.sin(np.exp(a)) +np.linalg.svd(a) +np.linalg.eigh(a) + +np.unique(np.random.randint(0, 10, 100)) +np.sort(np.random.uniform(0, 10, 100)) + +np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) +np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum() +np.polynomial.Legendre([7, 8, 9]).roots() + +print("I made it!") diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/test_pyinstaller.py b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/test_pyinstaller.py new file mode 100644 index 0000000000000000000000000000000000000000..d8d29ebbb94944c06fdf87962aecd50a6b4945d5 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_pyinstaller/tests/test_pyinstaller.py @@ -0,0 +1,35 @@ +import subprocess +from pathlib import Path + +import pytest + + +# PyInstaller has been very unproactive about replacing 'imp' with 'importlib'. +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +# It also leaks io.BytesIO()s. +@pytest.mark.filterwarnings('ignore::ResourceWarning') +@pytest.mark.parametrize("mode", ["--onedir", "--onefile"]) +@pytest.mark.slow +def test_pyinstaller(mode, tmp_path): + """Compile and run pyinstaller-smoke.py using PyInstaller.""" + + pyinstaller_cli = pytest.importorskip("PyInstaller.__main__").run + + source = Path(__file__).with_name("pyinstaller-smoke.py").resolve() + args = [ + # Place all generated files in ``tmp_path``. + '--workpath', str(tmp_path / "build"), + '--distpath', str(tmp_path / "dist"), + '--specpath', str(tmp_path), + mode, + str(source), + ] + pyinstaller_cli(args) + + if mode == "--onefile": + exe = tmp_path / "dist" / source.stem + else: + exe = tmp_path / "dist" / source.stem / source.stem + + p = subprocess.run([str(exe)], check=True, stdout=subprocess.PIPE) + assert p.stdout.strip() == b"I made it!" diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__init__.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67ddb9badfd6bd9f3777ad24084bd539de0906c9 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__init__.py @@ -0,0 +1,154 @@ +"""Private counterpart of ``numpy.typing``.""" + +from __future__ import annotations + +from ._nested_sequence import ( + _NestedSequence as _NestedSequence, +) +from ._nbit_base import ( + NBitBase as NBitBase, + _8Bit as _8Bit, + _16Bit as _16Bit, + _32Bit as _32Bit, + _64Bit as _64Bit, + _80Bit as _80Bit, + _96Bit as _96Bit, + _128Bit as _128Bit, + _256Bit as _256Bit, +) +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitShort as _NBitShort, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitInt as _NBitInt, + _NBitLong as _NBitLong, + _NBitLongLong as _NBitLongLong, + _NBitHalf as _NBitHalf, + _NBitSingle as _NBitSingle, + _NBitDouble as _NBitDouble, + _NBitLongDouble as _NBitLongDouble, +) +from ._char_codes import ( + _BoolCodes as _BoolCodes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, + _Int8Codes as _Int8Codes, + _Int16Codes as _Int16Codes, + _Int32Codes as _Int32Codes, + _Int64Codes as _Int64Codes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ByteCodes as _ByteCodes, + _ShortCodes as _ShortCodes, + _IntCCodes as _IntCCodes, + _IntPCodes as _IntPCodes, + _IntCodes as _IntCodes, + _LongCodes as _LongCodes, + _LongLongCodes as _LongLongCodes, + _UByteCodes as _UByteCodes, + _UShortCodes as _UShortCodes, + _UIntCCodes as _UIntCCodes, + _UIntPCodes as _UIntPCodes, + _UIntCodes as _UIntCodes, + _ULongCodes as _ULongCodes, + _ULongLongCodes as _ULongLongCodes, + _HalfCodes as _HalfCodes, + _SingleCodes as _SingleCodes, + _DoubleCodes as _DoubleCodes, + _LongDoubleCodes as _LongDoubleCodes, + _CSingleCodes as _CSingleCodes, + _CDoubleCodes as _CDoubleCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _DT64Codes as _DT64Codes, + _TD64Codes as _TD64Codes, + _StrCodes as _StrCodes, + _BytesCodes as _BytesCodes, + _VoidCodes as _VoidCodes, + _ObjectCodes as _ObjectCodes, + _StringCodes as _StringCodes, + _UnsignedIntegerCodes as _UnsignedIntegerCodes, + _SignedIntegerCodes as _SignedIntegerCodes, + _IntegerCodes as _IntegerCodes, + _FloatingCodes as _FloatingCodes, + _ComplexFloatingCodes as _ComplexFloatingCodes, + _InexactCodes as _InexactCodes, + _NumberCodes as _NumberCodes, + _CharacterCodes as _CharacterCodes, + _FlexibleCodes as _FlexibleCodes, + _GenericCodes as _GenericCodes, +) +from ._scalars import ( + _CharLike_co as _CharLike_co, + _BoolLike_co as _BoolLike_co, + _UIntLike_co as _UIntLike_co, + _IntLike_co as _IntLike_co, + _FloatLike_co as _FloatLike_co, + _ComplexLike_co as _ComplexLike_co, + _TD64Like_co as _TD64Like_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _VoidLike_co as _VoidLike_co, +) +from ._shape import ( + _Shape as _Shape, + _ShapeLike as _ShapeLike, +) +from ._dtype_like import ( + DTypeLike as DTypeLike, + _DTypeLike as _DTypeLike, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, + _DTypeLikeBool as _DTypeLikeBool, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeInt as _DTypeLikeInt, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeComplex as _DTypeLikeComplex, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeObject as _DTypeLikeObject, + _DTypeLikeVoid as _DTypeLikeVoid, + _DTypeLikeStr as _DTypeLikeStr, + _DTypeLikeBytes as _DTypeLikeBytes, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, +) +from ._array_like import ( + NDArray as NDArray, + ArrayLike as ArrayLike, + _ArrayLike as _ArrayLike, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeString_co as _ArrayLikeString_co, + _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, + _ArrayLikeUnknown as _ArrayLikeUnknown, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, + _UnknownType as _UnknownType, +) + +from ._ufunc import ( + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, +) diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..081696c1b148619f8d4de663ba972308ea561601 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/__init__.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f45fcdfde84659a4cd418094e9ebaa07ba0c0d89 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d28b6b1fb200dfa2b48dc41094f8f4fe61c0de0 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ff860d7076292e1f79cecb2127f2f73f8b8f5a3 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c3eea79c86b24a7858978ee663b183b90196723 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..782d953983e1b4b719a40ccd4ce0226cad11d448 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bf15be945d1206bc361cb3ab2f392b19c7ba5a1 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_nbit_base.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_nbit_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50b3d3f5e273bf5192e28004c642d16c5c7e288c Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_nbit_base.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01d2320f77ba441d4bbbbaf13003a75179c38f85 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e414c6e87bfa3fa8eff2188ae9de8752a1bc2d5d Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a4c51d124b1863d9f843587238027a797e06ba9 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_ufunc.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_ufunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37051ef6a0288cd5f423772f0313e97d9d86811c Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_typing/__pycache__/_ufunc.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_add_docstring.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_add_docstring.py new file mode 100644 index 0000000000000000000000000000000000000000..b12ad5afe9060c1338dcb9c89642d9acd62c8ebf --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_add_docstring.py @@ -0,0 +1,153 @@ +"""A module for creating docstrings for sphinx ``data`` domains.""" + +import re +import textwrap + +from ._array_like import NDArray + +_docstrings_list = [] + + +def add_newdoc(name: str, value: str, doc: str) -> None: + """Append ``_docstrings_list`` with a docstring for `name`. + + Parameters + ---------- + name : str + The name of the object. + value : str + A string-representation of the object. + doc : str + The docstring of the object. + + """ + _docstrings_list.append((name, value, doc)) + + +def _parse_docstrings() -> str: + """Convert all docstrings in ``_docstrings_list`` into a single + sphinx-legible text block. + + """ + type_list_ret = [] + for name, value, doc in _docstrings_list: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + indent = "" + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + if prev == "Examples": + indent = "" + new_lines.append(f'{m.group(1)}.. rubric:: {prev}') + else: + indent = 4 * " " + new_lines.append(f'{m.group(1)}.. admonition:: {prev}') + new_lines.append("") + else: + new_lines.append(f"{indent}{line}") + + s = "\n".join(new_lines) + s_block = f""".. data:: {name}\n :value: {value}\n {s}""" + type_list_ret.append(s_block) + return "\n".join(type_list_ret) + + +add_newdoc('ArrayLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into an `~numpy.ndarray`. + + Among others this includes the likes of: + + * Scalars. + * (Nested) sequences. + * Objects implementing the `~class.__array__` protocol. + + .. versionadded:: 1.20 + + See Also + -------- + :term:`array_like`: + Any scalar or sequence that can be interpreted as an ndarray. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_array(a: npt.ArrayLike) -> np.ndarray: + ... return np.array(a) + + """) + +add_newdoc('DTypeLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into a `~numpy.dtype`. + + Among others this includes the likes of: + + * :class:`type` objects. + * Character codes or the names of :class:`type` objects. + * Objects with the ``.dtype`` attribute. + + .. versionadded:: 1.20 + + See Also + -------- + :ref:`Specifying and constructing data types ` + A comprehensive overview of all objects that can be coerced + into data types. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_dtype(d: npt.DTypeLike) -> np.dtype: + ... return np.dtype(d) + + """) + +add_newdoc('NDArray', repr(NDArray), + """ + A `np.ndarray[tuple[int, ...], np.dtype[+ScalarType]] ` + type alias :term:`generic ` w.r.t. its + `dtype.type `. + + Can be used during runtime for typing arrays with a given dtype + and unspecified shape. + + .. versionadded:: 1.21 + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[tuple[int, ...], numpy.dtype[+_ScalarType_co]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) + + """) + +_docstrings = _parse_docstrings() diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_array_like.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_array_like.py new file mode 100644 index 0000000000000000000000000000000000000000..9db1fe46be56070dae20ee7d0bc078580be0ae28 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_array_like.py @@ -0,0 +1,192 @@ +from __future__ import annotations + +import sys +from collections.abc import Collection, Callable, Sequence +from typing import Any, Protocol, TypeAlias, TypeVar, runtime_checkable, TYPE_CHECKING + +import numpy as np +from numpy import ( + ndarray, + dtype, + generic, + unsignedinteger, + integer, + floating, + complexfloating, + number, + timedelta64, + datetime64, + object_, + void, + str_, + bytes_, +) +from ._nbit_base import _32Bit, _64Bit +from ._nested_sequence import _NestedSequence +from ._shape import _Shape + +if TYPE_CHECKING: + StringDType = np.dtypes.StringDType +else: + # at runtime outside of type checking importing this from numpy.dtypes + # would lead to a circular import + from numpy._core.multiarray import StringDType + +_T = TypeVar("_T") +_ScalarType = TypeVar("_ScalarType", bound=generic) +_ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True) +_DType = TypeVar("_DType", bound=dtype[Any]) +_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) + +NDArray: TypeAlias = ndarray[_Shape, dtype[_ScalarType_co]] + +# The `_SupportsArray` protocol only cares about the default dtype +# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned +# array. +# Concrete implementations of the protocol are responsible for adding +# any and all remaining overloads +@runtime_checkable +class _SupportsArray(Protocol[_DType_co]): + def __array__(self) -> ndarray[Any, _DType_co]: ... + + +@runtime_checkable +class _SupportsArrayFunc(Protocol): + """A protocol class representing `~class.__array_function__`.""" + def __array_function__( + self, + func: Callable[..., Any], + types: Collection[type[Any]], + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> object: ... + + +# TODO: Wait until mypy supports recursive objects in combination with typevars +_FiniteNestedSequence: TypeAlias = ( + _T + | Sequence[_T] + | Sequence[Sequence[_T]] + | Sequence[Sequence[Sequence[_T]]] + | Sequence[Sequence[Sequence[Sequence[_T]]]] +) + +# A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` +_ArrayLike: TypeAlias = ( + _SupportsArray[dtype[_ScalarType]] + | _NestedSequence[_SupportsArray[dtype[_ScalarType]]] +) + +# A union representing array-like objects; consists of two typevars: +# One representing types that can be parametrized w.r.t. `np.dtype` +# and another one for the rest +_DualArrayLike: TypeAlias = ( + _SupportsArray[_DType] + | _NestedSequence[_SupportsArray[_DType]] + | _T + | _NestedSequence[_T] +) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _Buffer +else: + @runtime_checkable + class _Buffer(Protocol): + def __buffer__(self, flags: int, /) -> memoryview: ... + +ArrayLike: TypeAlias = _Buffer | _DualArrayLike[ + dtype[Any], + bool | int | float | complex | str | bytes, +] + +# `ArrayLike_co`: array-like objects that can be coerced into `X` +# given the casting rules `same_kind` +_ArrayLikeBool_co: TypeAlias = _DualArrayLike[ + dtype[np.bool], + bool, +] +_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[unsignedinteger[Any]], + bool, +] +_ArrayLikeInt_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[integer[Any]], + bool | int, +] +_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[integer[Any]] | dtype[floating[Any]], + bool | int | float, +] +_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[ + ( + dtype[np.bool] + | dtype[integer[Any]] + | dtype[floating[Any]] + | dtype[complexfloating[Any, Any]] + ), + bool | int | float | complex, +] +_ArrayLikeNumber_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[number[Any]], + bool | int | float | complex, +] +_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[integer[Any]] | dtype[timedelta64], + bool | int, +] +_ArrayLikeDT64_co: TypeAlias = ( + _SupportsArray[dtype[datetime64]] + | _NestedSequence[_SupportsArray[dtype[datetime64]]] +) +_ArrayLikeObject_co: TypeAlias = ( + _SupportsArray[dtype[object_]] + | _NestedSequence[_SupportsArray[dtype[object_]]] +) + +_ArrayLikeVoid_co: TypeAlias = ( + _SupportsArray[dtype[void]] + | _NestedSequence[_SupportsArray[dtype[void]]] +) +_ArrayLikeStr_co: TypeAlias = _DualArrayLike[ + dtype[str_], + str, +] +_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[ + dtype[bytes_], + bytes, +] +_ArrayLikeString_co: TypeAlias = _DualArrayLike[ + StringDType, + str +] +_ArrayLikeAnyString_co: TypeAlias = ( + _ArrayLikeStr_co | + _ArrayLikeBytes_co | + _ArrayLikeString_co +) + +__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float | int] +_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex | float | int] + +# NOTE: This includes `builtins.bool`, but not `numpy.bool`. +_ArrayLikeInt: TypeAlias = _DualArrayLike[ + dtype[integer[Any]], + int, +] + +# Extra ArrayLike type so that pyright can deal with NDArray[Any] +# Used as the first overload, should only match NDArray[Any], +# not any actual types. +# https://github.com/numpy/numpy/pull/22193 +if sys.version_info >= (3, 11): + from typing import Never as _UnknownType +else: + from typing import NoReturn as _UnknownType + + +_ArrayLikeUnknown: TypeAlias = _DualArrayLike[ + dtype[_UnknownType], + _UnknownType, +] diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_callable.pyi b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_callable.pyi new file mode 100644 index 0000000000000000000000000000000000000000..dee99ba1c94bba5dc1eb65ee7e348bc78b5398b0 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_callable.pyi @@ -0,0 +1,365 @@ +""" +A module with various ``typing.Protocol`` subclasses that implement +the ``__call__`` magic method. + +See the `Mypy documentation`_ on protocols for more details. + +.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols + +""" + +from typing import ( + TypeAlias, + TypeVar, + final, + overload, + Any, + NoReturn, + Protocol, + type_check_only, +) + +import numpy as np +from numpy import ( + generic, + number, + integer, + unsignedinteger, + signedinteger, + int8, + int_, + floating, + float64, + complexfloating, + complex128, +) +from ._nbit import _NBitInt +from ._scalars import ( + _BoolLike_co, + _IntLike_co, + _NumberLike_co, +) +from . import NBitBase +from ._array_like import NDArray +from ._nested_sequence import _NestedSequence + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T1_contra = TypeVar("_T1_contra", contravariant=True) +_T2_contra = TypeVar("_T2_contra", contravariant=True) + +_2Tuple: TypeAlias = tuple[_T1, _T1] + +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +_IntType = TypeVar("_IntType", bound=integer[Any]) +_FloatType = TypeVar("_FloatType", bound=floating[Any]) +_NumberType = TypeVar("_NumberType", bound=number[Any]) +_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number[Any]) +_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) + +@type_check_only +class _BoolOp(Protocol[_GenericType_co]): + @overload + def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +@type_check_only +class _BoolBitOp(Protocol[_GenericType_co]): + @overload + def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: _IntType, /) -> _IntType: ... + +@type_check_only +class _BoolSub(Protocol): + # Note that `other: bool` is absent here + @overload + def __call__(self, other: bool, /) -> NoReturn: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +@type_check_only +class _BoolTrueDiv(Protocol): + @overload + def __call__(self, other: float | _IntLike_co, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +@type_check_only +class _BoolMod(Protocol): + @overload + def __call__(self, other: _BoolLike_co, /) -> int8: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: _IntType, /) -> _IntType: ... + @overload + def __call__(self, other: _FloatType, /) -> _FloatType: ... + +@type_check_only +class _BoolDivMod(Protocol): + @overload + def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... + @overload # platform dependent + def __call__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[np.float64]: ... + @overload + def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... + @overload + def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... + +@type_check_only +class _IntTrueDiv(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: complex, / + ) -> complexfloating[_NBit1, _NBit1] | complex128: ... + @overload + def __call__( + self, other: integer[_NBit2], / + ) -> floating[_NBit1] | floating[_NBit2]: ... + +@type_check_only +class _UnsignedIntOp(Protocol[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + @overload + def __call__(self, other: int, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: unsignedinteger[_NBit2], /) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... + @overload + def __call__(self, other: signedinteger, /) -> Any: ... + +@type_check_only +class _UnsignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[Any]: ... + @overload + def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... + +@type_check_only +class _UnsignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__(self, other: int | signedinteger[Any], /) -> Any: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... + +@type_check_only +class _UnsignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__(self, other: int | signedinteger[Any], /) -> _2Tuple[Any]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> _2Tuple[unsignedinteger[_NBit1]] | _2Tuple[unsignedinteger[_NBit2]]: ... + +@type_check_only +class _SignedIntOp(Protocol[_NBit1]): + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: signedinteger[_NBit2], /) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... + +@type_check_only +class _SignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], / + ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... + +@type_check_only +class _SignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], / + ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... + +@type_check_only +class _SignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[int_]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], / + ) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[signedinteger[_NBit2]]: ... + +@type_check_only +class _FloatOp(Protocol[_NBit1]): + @overload + def __call__(self, other: int, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: complex, / + ) -> complexfloating[_NBit1, _NBit1] | complex128: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> floating[_NBit1] | floating[_NBit2]: ... + +@type_check_only +class _FloatMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> floating[_NBit1] | floating[_NBit2]: ... + +class _FloatDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... + @overload + def __call__( + self, other: int, / + ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBitInt]]: ... + @overload + def __call__( + self, other: float, / + ) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBit2]]: ... + +@type_check_only +class _NumberOp(Protocol): + def __call__(self, other: _NumberLike_co, /) -> Any: ... + +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGT, /) -> np.bool: ... + +@final +@type_check_only +class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGE], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGE, /) -> np.bool: ... + +@final +@type_check_only +class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsLT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsLT, /) -> np.bool: ... + +@final +@type_check_only +class _ComparisonOpGE(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGT, /) -> np.bool: ... diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_char_codes.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_char_codes.py new file mode 100644 index 0000000000000000000000000000000000000000..3560527efbeea24d2909aa38eeac3d1c23f258d9 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_char_codes.py @@ -0,0 +1,214 @@ +from typing import Literal + +_BoolCodes = Literal[ + "bool", "bool_", + "?", "|?", "=?", "?", + "b1", "|b1", "=b1", "b1", +] # fmt: skip + +_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] +_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] +_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] + +_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] +_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] + +_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] +_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] +_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] + +_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] +_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] + +_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] +_ShortCodes = Literal["short", "h", "|h", "=h", "h"] +_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] +_LongCodes = Literal["long", "l", "|l", "=l", "l"] +_IntCodes = _IntPCodes +_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] + +_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] +_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] +_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] +_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] +_UIntCodes = _UIntPCodes +_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] + +_HalfCodes = Literal["half", "e", "|e", "=e", "e"] +_SingleCodes = Literal["single", "f", "|f", "=f", "f"] +_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] +_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] + +_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] +_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] + +_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +_VoidCodes = Literal["void", "V", "|V", "=V", "V"] +_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] + +_DT64Codes = Literal[ + "datetime64", "|datetime64", "=datetime64", + "datetime64", + "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", + "datetime64[Y]", + "datetime64[M]", "|datetime64[M]", "=datetime64[M]", + "datetime64[M]", + "datetime64[W]", "|datetime64[W]", "=datetime64[W]", + "datetime64[W]", + "datetime64[D]", "|datetime64[D]", "=datetime64[D]", + "datetime64[D]", + "datetime64[h]", "|datetime64[h]", "=datetime64[h]", + "datetime64[h]", + "datetime64[m]", "|datetime64[m]", "=datetime64[m]", + "datetime64[m]", + "datetime64[s]", "|datetime64[s]", "=datetime64[s]", + "datetime64[s]", + "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", + "datetime64[ms]", + "datetime64[us]", "|datetime64[us]", "=datetime64[us]", + "datetime64[us]", + "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", + "datetime64[ns]", + "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", + "datetime64[ps]", + "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", + "datetime64[fs]", + "datetime64[as]", "|datetime64[as]", "=datetime64[as]", + "datetime64[as]", + "M", "|M", "=M", "M", + "M8", "|M8", "=M8", "M8", + "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +] +_TD64Codes = Literal[ + "timedelta64", "|timedelta64", "=timedelta64", + "timedelta64", + "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", + "timedelta64[Y]", + "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", + "timedelta64[M]", + "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", + "timedelta64[W]", + "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", + "timedelta64[D]", + "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", + "timedelta64[h]", + "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", + "timedelta64[m]", + "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", + "timedelta64[s]", + "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", + "timedelta64[ms]", + "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", + "timedelta64[us]", + "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", + "timedelta64[ns]", + "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", + "timedelta64[ps]", + "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", + "timedelta64[fs]", + "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", + "timedelta64[as]", + "m", "|m", "=m", "m", + "m8", "|m8", "=m8", "m8", + "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", +] + +# NOTE: `StringDType' has no scalar type, and therefore has no name that can +# be passed to the `dtype` constructor +_StringCodes = Literal["T", "|T", "=T", "T"] + +# NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't +# the case for a `Union` of `Literal`s. +# So even though they're equivalent when type-checking, they differ at runtime. +# Another advantage of nesting, is that they always have a "flat" +# `Literal.__args__`, which is a tuple of *literally* all its literal values. + +_UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, +] +_SignedIntegerCodes = Literal[ + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _LongCodes, + _LongLongCodes, +] +_FloatingCodes = Literal[ + _Float16Codes, + _Float32Codes, + _Float64Codes, + _LongDoubleCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes +] +_ComplexFloatingCodes = Literal[ + _Complex64Codes, + _Complex128Codes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, +] +_IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +_InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +_NumberCodes = Literal[_IntegerCodes, _InexactCodes] + +_CharacterCodes = Literal[_StrCodes, _BytesCodes] +_FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] + +_GenericCodes = Literal[ + _BoolCodes, + _NumberCodes, + _FlexibleCodes, + _DT64Codes, + _TD64Codes, + _ObjectCodes, + # TODO: add `_StringCodes` once it has a scalar type + # _StringCodes, +] diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_dtype_like.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_dtype_like.py new file mode 100644 index 0000000000000000000000000000000000000000..52753cbb227630af2dd42801fb48b003cbe21b0d --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_dtype_like.py @@ -0,0 +1,249 @@ +from collections.abc import Sequence # noqa: F811 +from typing import ( + Any, + TypeAlias, + TypeVar, + Protocol, + TypedDict, + runtime_checkable, +) + +import numpy as np + +from ._shape import _ShapeLike + +from ._char_codes import ( + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _LongCodes, + _LongLongCodes, + _IntPCodes, + _IntCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, + _UIntPCodes, + _UIntCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, +) + +_SCT = TypeVar("_SCT", bound=np.generic) +_DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any]) + +_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types + + +# Mandatory keys +class _DTypeDictBase(TypedDict): + names: Sequence[str] + formats: Sequence[_DTypeLikeNested] + + +# Mandatory + optional keys +class _DTypeDict(_DTypeDictBase, total=False): + # Only `str` elements are usable as indexing aliases, + # but `titles` can in principle accept any object + offsets: Sequence[int] + titles: Sequence[Any] + itemsize: int + aligned: bool + + +# A protocol for anything with the dtype attribute +@runtime_checkable +class _SupportsDType(Protocol[_DType_co]): + @property + def dtype(self) -> _DType_co: ... + + +# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` +_DTypeLike: TypeAlias = ( + np.dtype[_SCT] + | type[_SCT] + | _SupportsDType[np.dtype[_SCT]] +) + + +# Would create a dtype[np.void] +_VoidDTypeLike: TypeAlias = ( + # (flexible_dtype, itemsize) + tuple[_DTypeLikeNested, int] + # (fixed_dtype, shape) + | tuple[_DTypeLikeNested, _ShapeLike] + # [(field_name, field_dtype, field_shape), ...] + # + # The type here is quite broad because NumPy accepts quite a wide + # range of inputs inside the list; see the tests for some + # examples. + | list[Any] + # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., + # 'itemsize': ...} + | _DTypeDict + # (base_dtype, new_dtype) + | tuple[_DTypeLikeNested, _DTypeLikeNested] +) + +# Anything that can be coerced into numpy.dtype. +# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html +DTypeLike: TypeAlias = ( + np.dtype[Any] + # default data type (float64) + | None + # array-scalar types and generic types + | type[Any] # NOTE: We're stuck with `type[Any]` due to object dtypes + # anything with a dtype attribute + | _SupportsDType[np.dtype[Any]] + # character codes, type strings or comma-separated fields, e.g., 'float64' + | str + | _VoidDTypeLike +) + +# NOTE: while it is possible to provide the dtype as a dict of +# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), +# this syntax is officially discouraged and +# therefore not included in the type-union defining `DTypeLike`. +# +# See https://github.com/numpy/numpy/issues/16891 for more details. + +# Aliases for commonly used dtype-like objects. +# Note that the precision of `np.number` subclasses is ignored herein. +_DTypeLikeBool: TypeAlias = ( + type[bool] + | type[np.bool] + | np.dtype[np.bool] + | _SupportsDType[np.dtype[np.bool]] + | _BoolCodes +) +_DTypeLikeUInt: TypeAlias = ( + type[np.unsignedinteger[Any]] + | np.dtype[np.unsignedinteger[Any]] + | _SupportsDType[np.dtype[np.unsignedinteger[Any]]] + | _UInt8Codes + | _UInt16Codes + | _UInt32Codes + | _UInt64Codes + | _UByteCodes + | _UShortCodes + | _UIntCCodes + | _LongCodes + | _ULongLongCodes + | _UIntPCodes + | _UIntCodes +) +_DTypeLikeInt: TypeAlias = ( + type[int] + | type[np.signedinteger[Any]] + | np.dtype[np.signedinteger[Any]] + | _SupportsDType[np.dtype[np.signedinteger[Any]]] + | _Int8Codes + | _Int16Codes + | _Int32Codes + | _Int64Codes + | _ByteCodes + | _ShortCodes + | _IntCCodes + | _LongCodes + | _LongLongCodes + | _IntPCodes + | _IntCodes +) +_DTypeLikeFloat: TypeAlias = ( + type[float] + | type[np.floating[Any]] + | np.dtype[np.floating[Any]] + | _SupportsDType[np.dtype[np.floating[Any]]] + | _Float16Codes + | _Float32Codes + | _Float64Codes + | _HalfCodes + | _SingleCodes + | _DoubleCodes + | _LongDoubleCodes +) +_DTypeLikeComplex: TypeAlias = ( + type[complex] + | type[np.complexfloating[Any]] + | np.dtype[np.complexfloating[Any]] + | _SupportsDType[np.dtype[np.complexfloating[Any]]] + | _Complex64Codes + | _Complex128Codes + | _CSingleCodes + | _CDoubleCodes + | _CLongDoubleCodes +) +_DTypeLikeDT64: TypeAlias = ( + type[np.timedelta64] + | np.dtype[np.timedelta64] + | _SupportsDType[np.dtype[np.timedelta64]] + | _TD64Codes +) +_DTypeLikeTD64: TypeAlias = ( + type[np.datetime64] + | np.dtype[np.datetime64] + | _SupportsDType[np.dtype[np.datetime64]] + | _DT64Codes +) +_DTypeLikeStr: TypeAlias = ( + type[str] + | type[np.str_] + | np.dtype[np.str_] + | _SupportsDType[np.dtype[np.str_]] + | _StrCodes +) +_DTypeLikeBytes: TypeAlias = ( + type[bytes] + | type[np.bytes_] + | np.dtype[np.bytes_] + | _SupportsDType[np.dtype[np.bytes_]] + | _BytesCodes +) +_DTypeLikeVoid: TypeAlias = ( + type[np.void] + | np.dtype[np.void] + | _SupportsDType[np.dtype[np.void]] + | _VoidCodes + | _VoidDTypeLike +) +_DTypeLikeObject: TypeAlias = ( + type + | np.dtype[np.object_] + | _SupportsDType[np.dtype[np.object_]] + | _ObjectCodes +) + +_DTypeLikeComplex_co: TypeAlias = ( + _DTypeLikeBool + | _DTypeLikeUInt + | _DTypeLikeInt + | _DTypeLikeFloat + | _DTypeLikeComplex +) diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_extended_precision.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_extended_precision.py new file mode 100644 index 0000000000000000000000000000000000000000..2af48891bc0fd4fda7bbc34b01e1eb2d5e7146f6 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_extended_precision.py @@ -0,0 +1,27 @@ +"""A module with platform-specific extended precision +`numpy.number` subclasses. + +The subclasses are defined here (instead of ``__init__.pyi``) such +that they can be imported conditionally via the numpy's mypy plugin. +""" + +import numpy as np +from . import ( + _80Bit, + _96Bit, + _128Bit, + _256Bit, +) + +uint128 = np.unsignedinteger[_128Bit] +uint256 = np.unsignedinteger[_256Bit] +int128 = np.signedinteger[_128Bit] +int256 = np.signedinteger[_256Bit] +float80 = np.floating[_80Bit] +float96 = np.floating[_96Bit] +float128 = np.floating[_128Bit] +float256 = np.floating[_256Bit] +complex160 = np.complexfloating[_80Bit, _80Bit] +complex192 = np.complexfloating[_96Bit, _96Bit] +complex256 = np.complexfloating[_128Bit, _128Bit] +complex512 = np.complexfloating[_256Bit, _256Bit] diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_nbit.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_nbit.py new file mode 100644 index 0000000000000000000000000000000000000000..cdba6a32f7e652ebd8e505fb3257fd017d3bb612 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_nbit.py @@ -0,0 +1,19 @@ +"""A module with the precisions of platform-specific `~numpy.number`s.""" + +from typing import TypeAlias +from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit + + +# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin +_NBitByte: TypeAlias = _8Bit +_NBitShort: TypeAlias = _16Bit +_NBitIntC: TypeAlias = _32Bit +_NBitIntP: TypeAlias = _32Bit | _64Bit +_NBitInt: TypeAlias = _NBitIntP +_NBitLong: TypeAlias = _32Bit | _64Bit +_NBitLongLong: TypeAlias = _64Bit + +_NBitHalf: TypeAlias = _16Bit +_NBitSingle: TypeAlias = _32Bit +_NBitDouble: TypeAlias = _64Bit +_NBitLongDouble: TypeAlias = _64Bit | _96Bit | _128Bit diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_nbit_base.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_nbit_base.py new file mode 100644 index 0000000000000000000000000000000000000000..92218f2ed844228c00fb5de7736af28aa071b59b --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_nbit_base.py @@ -0,0 +1,100 @@ +"""A module with the precisions of generic `~numpy.number` types.""" +from .._utils import set_module +from typing import final + + +@final # Disallow the creation of arbitrary `NBitBase` subclasses +@set_module("numpy.typing") +class NBitBase: + """ + A type representing `numpy.number` precision during static type checking. + + Used exclusively for the purpose static type checking, `NBitBase` + represents the base of a hierarchical set of subclasses. + Each subsequent subclass is herein used for representing a lower level + of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. + + .. versionadded:: 1.20 + + Examples + -------- + Below is a typical usage example: `NBitBase` is herein used for annotating + a function that takes a float and integer of arbitrary precision + as arguments and returns a new float of whichever precision is largest + (*e.g.* ``np.float16 + np.int64 -> np.float64``). + + .. code-block:: python + + >>> from __future__ import annotations + >>> from typing import TypeVar, TYPE_CHECKING + >>> import numpy as np + >>> import numpy.typing as npt + + >>> S = TypeVar("S", bound=npt.NBitBase) + >>> T = TypeVar("T", bound=npt.NBitBase) + + >>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]: + ... return a + b + + >>> a = np.float16() + >>> b = np.int64() + >>> out = add(a, b) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.floating[numpy.typing._16Bit*] + ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] + ... # note: out: numpy.floating[numpy.typing._64Bit*] + + """ + + def __init_subclass__(cls) -> None: + allowed_names = { + "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", + "_64Bit", "_32Bit", "_16Bit", "_8Bit", + } + if cls.__name__ not in allowed_names: + raise TypeError('cannot inherit from final class "NBitBase"') + super().__init_subclass__() + +@final +@set_module("numpy._typing") +# Silence errors about subclassing a `@final`-decorated class +class _256Bit(NBitBase): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _128Bit(_256Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _96Bit(_128Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _80Bit(_96Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _64Bit(_80Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _32Bit(_64Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _16Bit(_32Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _8Bit(_16Bit): # type: ignore[misc] + pass diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_nested_sequence.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_nested_sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..b6fb08d1c0057c07b15839d73b4f4402fccb3921 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_nested_sequence.py @@ -0,0 +1,89 @@ +"""A module containing the `_NestedSequence` protocol.""" + +from __future__ import annotations + +from typing import ( + Any, + TypeVar, + Protocol, + runtime_checkable, + TYPE_CHECKING, +) + +if TYPE_CHECKING: + from collections.abc import Iterator + +__all__ = ["_NestedSequence"] + +_T_co = TypeVar("_T_co", covariant=True) + + +@runtime_checkable +class _NestedSequence(Protocol[_T_co]): + """A protocol for representing nested sequences. + + Warning + ------- + `_NestedSequence` currently does not work in combination with typevars, + *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``. + + See Also + -------- + collections.abc.Sequence + ABCs for read-only and mutable :term:`sequences`. + + Examples + -------- + .. code-block:: python + + >>> from __future__ import annotations + + >>> from typing import TYPE_CHECKING + >>> import numpy as np + >>> from numpy._typing import _NestedSequence + + >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]: + ... return np.asarray(seq).dtype + + >>> a = get_dtype([1.0]) + >>> b = get_dtype([[1.0]]) + >>> c = get_dtype([[[1.0]]]) + >>> d = get_dtype([[[[1.0]]]]) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + + """ + + def __len__(self, /) -> int: + """Implement ``len(self)``.""" + raise NotImplementedError + + def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: + """Implement ``self[x]``.""" + raise NotImplementedError + + def __contains__(self, x: object, /) -> bool: + """Implement ``x in self``.""" + raise NotImplementedError + + def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + """Implement ``iter(self)``.""" + raise NotImplementedError + + def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + """Implement ``reversed(self)``.""" + raise NotImplementedError + + def count(self, value: Any, /) -> int: + """Return the number of occurrences of `value`.""" + raise NotImplementedError + + def index(self, value: Any, /) -> int: + """Return the first index of `value`.""" + raise NotImplementedError diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_scalars.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_scalars.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9fab33198ad7718dba27f758218e0259df3234 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_scalars.py @@ -0,0 +1,27 @@ +from typing import Any, TypeAlias + +import numpy as np + +# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and +# `np.bytes_` are already subclasses of their builtin counterpart + +_CharLike_co: TypeAlias = str | bytes + +# The 6 `Like_co` type-aliases below represent all scalars that can be +# coerced into `` (with the casting rule `same_kind`) +_BoolLike_co: TypeAlias = bool | np.bool +_UIntLike_co: TypeAlias = np.unsignedinteger[Any] | _BoolLike_co +_IntLike_co: TypeAlias = int | np.integer[Any] | _BoolLike_co +_FloatLike_co: TypeAlias = float | np.floating[Any] | _IntLike_co +_ComplexLike_co: TypeAlias = ( + complex + | np.complexfloating[Any, Any] + | _FloatLike_co +) +_TD64Like_co: TypeAlias = np.timedelta64 | _IntLike_co + +_NumberLike_co: TypeAlias = int | float | complex | np.number[Any] | np.bool +_ScalarLike_co: TypeAlias = int | float | complex | str | bytes | np.generic + +# `_VoidLike_co` is technically not a scalar, but it's close enough +_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_shape.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..e297e3480c5952bb46418b1b123cabe15a0350f6 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_shape.py @@ -0,0 +1,7 @@ +from collections.abc import Sequence +from typing import SupportsIndex, TypeAlias + +_Shape: TypeAlias = tuple[int, ...] + +# Anything that can be coerced to a shape tuple +_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_ufunc.py b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_ufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..835b099c49b7be4e131467f79b618c5539e3ec44 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_ufunc.py @@ -0,0 +1,7 @@ +from .. import ufunc + +_UFunc_Nin1_Nout1 = ufunc +_UFunc_Nin2_Nout1 = ufunc +_UFunc_Nin1_Nout2 = ufunc +_UFunc_Nin2_Nout2 = ufunc +_GUFunc_Nin2_Nout1 = ufunc diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_typing/_ufunc.pyi b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_ufunc.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3d1ae5837c68ee4675ab78c1fdd73143cc9b24ac --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_typing/_ufunc.pyi @@ -0,0 +1,942 @@ +"""A module with private type-check-only `numpy.ufunc` subclasses. + +The signatures of the ufuncs are too varied to reasonably type +with a single class. So instead, `ufunc` has been expanded into +four private subclasses, one for each combination of +`~ufunc.nin` and `~ufunc.nout`. +""" + +from typing import ( + Any, + Generic, + Literal, + NoReturn, + Protocol, + SupportsIndex, + TypeAlias, + TypedDict, + TypeVar, + overload, + type_check_only, +) + +from typing_extensions import LiteralString, Unpack + +import numpy as np +from numpy import _CastingKind, _OrderKACF, ufunc +from numpy.typing import NDArray + +from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._dtype_like import DTypeLike +from ._scalars import _ScalarLike_co +from ._shape import _ShapeLike + +_T = TypeVar("_T") +_2Tuple: TypeAlias = tuple[_T, _T] +_3Tuple: TypeAlias = tuple[_T, _T, _T] +_4Tuple: TypeAlias = tuple[_T, _T, _T, _T] + +_2PTuple: TypeAlias = tuple[_T, _T, Unpack[tuple[_T, ...]]] +_3PTuple: TypeAlias = tuple[_T, _T, _T, Unpack[tuple[_T, ...]]] +_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, Unpack[tuple[_T, ...]]] + +_NTypes = TypeVar("_NTypes", bound=int, covariant=True) +_IDType = TypeVar("_IDType", covariant=True) +_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) +_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) + +_NIn = TypeVar("_NIn", bound=int, covariant=True) +_NOut = TypeVar("_NOut", bound=int, covariant=True) +_ReturnType_co = TypeVar("_ReturnType_co", covariant=True) +_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) + + +@type_check_only +class _SupportsArrayUFunc(Protocol): + def __array_ufunc__( + self, + ufunc: ufunc, + method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + +@type_check_only +class _UFunc3Kwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + subok: bool + signature: _3Tuple[str | None] | str | None + +# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for +# ufuncs that don't accept two input arguments and return one output argument. +# In such cases the respective methods return `NoReturn` + +# NOTE: Similarly, `at` won't be defined for ufuncs that return +# multiple outputs; in such cases `at` is typed to return `NoReturn` + +# NOTE: If 2 output types are returned then `out` must be a +# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable + +# pyright: reportIncompatibleMethodOverride=false + +@type_check_only +class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + out: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[None | str] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[None | str] = ..., + ) -> NDArray[Any]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[None | str] = ..., + ) -> Any: ... + + def at( + self, + a: _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + /, + ) -> None: ... + + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload # (scalar, scalar) -> scalar + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Any: ... + @overload # (array-like, array) -> array + def __call__( + self, + x1: ArrayLike, + x2: NDArray[np.generic], + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def __call__( + self, + x1: NDArray[np.generic], + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... + + def at( + self, + a: NDArray[Any], + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... + + def reduce( + self, + array: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] = ..., + keepdims: bool = ..., + initial: Any = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] = ..., + ) -> NDArray[Any]: ... + + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] = ..., + ) -> NDArray[Any]: ... + + @overload # (scalar, scalar) -> scalar + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Any: ... + @overload # (array-like, array) -> array + def outer( + self, + A: ArrayLike, + B: NDArray[np.generic], + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def outer( + self, + A: NDArray[np.generic], + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... + +@type_check_only +class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __out1: None | NDArray[Any] = ..., + __out2: None | NDArray[Any] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + __out1: None | NDArray[Any] = ..., + __out2: None | NDArray[Any] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + ) -> _2Tuple[Any]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[4]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[None | str] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + __out1: None | NDArray[Any] = ..., + __out2: None | NDArray[Any] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[None | str] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> _Signature: ... + + # Scalar for 1D array-likes; ndarray otherwise + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: None = ..., + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: NDArray[Any] | tuple[NDArray[Any]], + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> NDArray[Any]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): + where: None | _ArrayLikeBool_co + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3(TypedDict, total=False): + where: None | _ArrayLikeBool_co + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3P(TypedDict, total=False): + where: None | _ArrayLikeBool_co + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _3PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): + where: None | _ArrayLikeBool_co + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _4PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: _ArrayType | tuple[_ArrayType], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ArrayType: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + /, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> Any: ... + + def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, /) -> None: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + +@type_check_only +class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: _ArrayType | tuple[_ArrayType], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayType: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + x2: _SupportsArrayUFunc | ArrayLike, + /, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: _SupportsArrayUFunc, + /, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + + def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, b: ArrayLike, /) -> None: ... + + @overload + def reduce( + self, + array: ArrayLike, + axis: None | _ShapeLike, + dtype: DTypeLike, + out: _ArrayType, + /, + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ArrayType: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayType | tuple[_ArrayType], + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ArrayType: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + *, + keepdims: Literal[True], + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> NDArray[np.object_]: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ReturnType_co | NDArray[np.object_]: ... + + @overload + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex, + dtype: DTypeLike, + out: _ArrayType, + /, + ) -> _ArrayType: ... + @overload + def reduceat( + self, + /, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayType | tuple[_ArrayType], + ) -> _ArrayType: ... + @overload + def reduceat( + self, + /, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[np.object_]: ... + @overload + def reduceat( + self, + /, + array: _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + ) -> Any: ... + + @overload + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex, + dtype: DTypeLike, + out: _ArrayType, + /, + ) -> _ArrayType: ... + @overload + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayType | tuple[_ArrayType], + ) -> _ArrayType: ... + @overload + def accumulate( + self, + /, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[np.object_]: ... + + @overload + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: _ArrayType, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayType: ... + @overload + def outer( + self, + A: _SupportsArrayUFunc, + B: _SupportsArrayUFunc | ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + @overload + def outer( + self, + A: _ScalarLike_co, + B: _SupportsArrayUFunc | ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + +@type_check_only +class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> Literal[1]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + x3: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: _ArrayType | tuple[_ArrayType], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ArrayType: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + x2: _SupportsArrayUFunc | ArrayLike, + x3: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> Any: ... + + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + +@type_check_only +class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> _NOut: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: _2PTuple[_ArrayType], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ArrayType]: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: None | _2PTuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> Any: ... + + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/__init__.py b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc1a36c437be2053364bbdda40bb932a013fe01d --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__init__.py @@ -0,0 +1,88 @@ +""" +This is a module for defining private helpers which do not depend on the +rest of NumPy. + +Everything in here must be self-contained so that it can be +imported anywhere else without creating circular imports. +If a utility requires the import of NumPy, it probably belongs +in ``numpy._core``. +""" + +import functools +import warnings +from ._convertions import asunicode, asbytes + + +def set_module(module): + """Private decorator for overriding __module__ on a function or class. + + Example usage:: + + @set_module('numpy') + def example(): + pass + + assert example.__module__ == 'numpy' + """ + def decorator(func): + if module is not None: + func.__module__ = module + return func + return decorator + + +def _rename_parameter(old_names, new_names, dep_version=None): + """ + Generate decorator for backward-compatible keyword renaming. + + Apply the decorator generated by `_rename_parameter` to functions with a + renamed parameter to maintain backward-compatibility. + + After decoration, the function behaves as follows: + If only the new parameter is passed into the function, behave as usual. + If only the old parameter is passed into the function (as a keyword), raise + a DeprecationWarning if `dep_version` is provided, and behave as usual + otherwise. + If both old and new parameters are passed into the function, raise a + DeprecationWarning if `dep_version` is provided, and raise the appropriate + TypeError (function got multiple values for argument). + + Parameters + ---------- + old_names : list of str + Old names of parameters + new_name : list of str + New names of parameters + dep_version : str, optional + Version of NumPy in which old parameter was deprecated in the format + 'X.Y.Z'. If supplied, the deprecation message will indicate that + support for the old parameter will be removed in version 'X.Y+2.Z' + + Notes + ----- + Untested with functions that accept *args. Probably won't work as written. + + """ + def decorator(fun): + @functools.wraps(fun) + def wrapper(*args, **kwargs): + __tracebackhide__ = True # Hide traceback for py.test + for old_name, new_name in zip(old_names, new_names): + if old_name in kwargs: + if dep_version: + end_version = dep_version.split('.') + end_version[1] = str(int(end_version[1]) + 2) + end_version = '.'.join(end_version) + msg = (f"Use of keyword argument `{old_name}` is " + f"deprecated and replaced by `{new_name}`. " + f"Support for `{old_name}` will be removed " + f"in NumPy {end_version}.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + if new_name in kwargs: + msg = (f"{fun.__name__}() got multiple values for " + f"argument now known as `{new_name}`") + raise TypeError(msg) + kwargs[new_name] = kwargs.pop(old_name) + return fun(*args, **kwargs) + return wrapper + return decorator diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/__init__.pyi b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..de68d8d2800e71b6483d6caee740581d8e08a977 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__init__.pyi @@ -0,0 +1,31 @@ +from collections.abc import Callable, Iterable +from typing import Protocol, overload, type_check_only + +from _typeshed import IdentityFunction +from typing_extensions import TypeVar + +from ._convertions import asbytes as asbytes +from ._convertions import asunicode as asunicode + +### + +_T = TypeVar("_T") +_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) + +@type_check_only +class _HasModule(Protocol): + __module__: str + +### + +@overload +def set_module(module: None) -> IdentityFunction: ... +@overload +def set_module(module: _HasModuleT) -> _HasModuleT: ... + +# +def _rename_parameter( + old_names: Iterable[str], + new_names: Iterable[str], + dep_version: str | None = None, +) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5033180fe4f02e738daaaaa2d34c06392b354596 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/_convertions.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/_convertions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7ab2e9c1c0dad6bf3245ac73fd2f1474cb7d7ec Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/_convertions.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/_inspect.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63361e9d35d0bf0a21bd3fbe8d7d7ea8b26f2d31 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/_inspect.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/_pep440.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/_pep440.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d0712873ff30bcc687d744d667dd26ab4ed5fb7 Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/numpy/_utils/__pycache__/_pep440.cpython-310.pyc differ diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/_convertions.py b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_convertions.py new file mode 100644 index 0000000000000000000000000000000000000000..b6c0360c1ce749ef79b103bc32cb1cd78f0745b0 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_convertions.py @@ -0,0 +1,18 @@ +""" +A set of methods retained from np.compat module that +are still used across codebase. +""" + +__all__ = ["asunicode", "asbytes"] + + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/_convertions.pyi b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_convertions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..63167182aa781f549f5f4b67b67e615c43e02c62 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_convertions.pyi @@ -0,0 +1,4 @@ +__all__ = ["asbytes", "asunicode"] + +def asunicode(s: bytes | str) -> str: ... +def asbytes(s: bytes | str) -> str: ... diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/_inspect.py b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..381ca3940b8aeb41ad1ad34154a1b19beb56dc27 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_inspect.py @@ -0,0 +1,191 @@ +"""Subset of inspect module from upstream python + +We use this instead of upstream because upstream inspect is slow to import, and +significantly contributes to numpy import times. Importing this copy has almost +no overhead. + +""" +import types + +__all__ = ['getargspec', 'formatargspec'] + +# ----------------------------------------------------------- type-checking +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + im_class class object in which this method belongs + im_func function object containing implementation of method + im_self instance to which this method is bound, or None + + """ + return isinstance(object, types.MethodType) + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + func_code code object containing compiled function bytecode + func_defaults tuple of any default values for arguments + func_doc (same as __doc__) + func_globals global namespace in which this function was defined + func_name (same as __name__) + + """ + return isinstance(object, types.FunctionType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + + """ + return isinstance(object, types.CodeType) + +# ------------------------------------------------ argument list extraction +# These constants are from Python's compile.h. +CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where 'args' is + a list of argument names (possibly containing nested lists), and + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + + """ + + if not iscode(co): + raise TypeError('arg is not a code object') + + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + + # The following acrobatics are for anonymous (tuple) arguments. + # Which we do not need to support, so remove to avoid importing + # the dis module. + for i in range(nargs): + if args[i][:1] in ['', '.']: + raise TypeError("tuple function arguments are not supported") + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, varkw + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('arg is not a Python function') + args, varargs, varkw = getargs(func.__code__) + return args, varargs, varkw, func.__defaults__ + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame. + + """ + args, varargs, varkw = getargs(frame.f_code) + return args, varargs, varkw, frame.f_locals + +def joinseq(seq): + if len(seq) == 1: + return '(' + seq[0] + ',)' + else: + return '(' + ', '.join(seq) + ')' + +def strseq(object, convert, join=joinseq): + """Recursively walk a sequence, stringifying each element. + + """ + if type(object) in [list, tuple]: + return join([strseq(_o, convert, join) for _o in object]) + else: + return convert(object) + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargspec. + + The first four arguments are (args, varargs, varkw, defaults). The + other four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i in range(len(args)): + spec = strseq(args[i], formatarg, join) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(varargs)) + if varkw is not None: + specs.append(formatvarkw(varkw)) + return '(' + ', '.join(specs) + ')' + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [strseq(arg, convert, join) for arg in args] + + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/_inspect.pyi b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_inspect.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8eba974f839b44e0d772695f0e503c415306dc75 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_inspect.pyi @@ -0,0 +1,71 @@ +import types +from collections.abc import Callable, Mapping +from typing import Any, Final, TypeAlias, overload + +from _typeshed import SupportsLenAndGetItem +from typing_extensions import TypeIs, TypeVar + +__all__ = ["formatargspec", "getargspec"] + +### + +_T = TypeVar("_T") +_RT = TypeVar("_RT") + +_StrSeq: TypeAlias = SupportsLenAndGetItem[str] +_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] + +_JoinFunc: TypeAlias = Callable[[list[_T]], _T] +_FormatFunc: TypeAlias = Callable[[_T], str] + +### + +CO_OPTIMIZED: Final = 1 +CO_NEWLOCALS: Final = 2 +CO_VARARGS: Final = 4 +CO_VARKEYWORDS: Final = 8 + +### + +def ismethod(object: object) -> TypeIs[types.MethodType]: ... +def isfunction(object: object) -> TypeIs[types.FunctionType]: ... +def iscode(object: object) -> TypeIs[types.CodeType]: ... + +### + +def getargs(co: types.CodeType) -> tuple[list[str], str | None, str | None]: ... +def getargspec(func: types.MethodType | types.FunctionType) -> tuple[list[str], str | None, str | None, tuple[Any, ...]]: ... +def getargvalues(frame: types.FrameType) -> tuple[list[str], str | None, str | None, dict[str, Any]]: ... + +# +def joinseq(seq: _StrSeq) -> str: ... + +# +@overload +def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... +@overload +def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... + +# +def formatargspec( + args: _StrSeq, + varargs: str | None = None, + varkw: str | None = None, + defaults: SupportsLenAndGetItem[object] | None = None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... +def formatargvalues( + args: _StrSeq, + varargs: str | None, + varkw: str | None, + locals: Mapping[str, object] | None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/_pep440.py b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_pep440.py new file mode 100644 index 0000000000000000000000000000000000000000..6f56d60c86179b3b669acd92c307cd95991504ac --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_pep440.py @@ -0,0 +1,487 @@ +"""Utility to compare pep440 compatible version strings. + +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. +""" + +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", +] + + +# BEGIN packaging/_structures.py + + +class Infinity: + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity: + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +# BEGIN packaging/version.py + + +NegativeInfinity = NegativeInfinity() + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # its adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(".post{0}".format(self._version.post[1]))
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(".dev{0}".format(self._version.dev[1]))
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{0}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We assume there is an implicit 0 in a pre-release if there is
+        # no numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower-case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume that if we are given a number but not given a letter,
+        # then this is using the implicit post release syntax (e.g., 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non-zero, then take the rest,
+    # re-reverse it back into the correct order, and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre-segment, but we _only_ want to do this
+    # if there is no pre- or a post-segment. If we have one of those, then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post-segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alphanumeric segments sort before numeric segments
+        # - Alphanumeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/pythonProject/.venv/Lib/site-packages/numpy/_utils/_pep440.pyi b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_pep440.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..42c46405dd3717a0c3ef04fa98c88786b93e379f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/numpy/_utils/_pep440.pyi
@@ -0,0 +1,121 @@
+import re
+from collections.abc import Callable
+from typing import (
+    Any,
+    ClassVar,
+    Final,
+    Generic,
+    NamedTuple,
+    TypeVar,
+    final,
+    type_check_only,
+)
+from typing import (
+    Literal as L,
+)
+
+from typing_extensions import TypeIs
+
+__all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"]
+
+###
+
+_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...])
+_CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True)
+
+###
+
+VERSION_PATTERN: Final[str] = ...
+
+class InvalidVersion(ValueError): ...
+
+@type_check_only
+@final
+class _InfinityType:
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: object, /) -> TypeIs[_InfinityType]: ...
+    def __ne__(self, other: object, /) -> bool: ...
+    def __lt__(self, other: object, /) -> L[False]: ...
+    def __le__(self, other: object, /) -> L[False]: ...
+    def __gt__(self, other: object, /) -> L[True]: ...
+    def __ge__(self, other: object, /) -> L[True]: ...
+    def __neg__(self) -> _NegativeInfinityType: ...
+
+Infinity: Final[_InfinityType] = ...
+
+@type_check_only
+@final
+class _NegativeInfinityType:
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: object, /) -> TypeIs[_NegativeInfinityType]: ...
+    def __ne__(self, other: object, /) -> bool: ...
+    def __lt__(self, other: object, /) -> L[True]: ...
+    def __le__(self, other: object, /) -> L[True]: ...
+    def __gt__(self, other: object, /) -> L[False]: ...
+    def __ge__(self, other: object, /) -> L[False]: ...
+    def __neg__(self) -> _InfinityType: ...
+
+NegativeInfinity: Final[_NegativeInfinityType] = ...
+
+class _Version(NamedTuple):
+    epoch: int
+    release: tuple[int, ...]
+    dev: tuple[str, int] | None
+    pre: tuple[str, int] | None
+    post: tuple[str, int] | None
+    local: tuple[str | int, ...] | None
+
+class _BaseVersion(Generic[_CmpKeyT_co]):
+    _key: _CmpKeyT_co
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: _BaseVersion, /) -> bool: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    def __ne__(self, other: _BaseVersion, /) -> bool: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    def __lt__(self, other: _BaseVersion, /) -> bool: ...
+    def __le__(self, other: _BaseVersion, /) -> bool: ...
+    def __ge__(self, other: _BaseVersion, /) -> bool: ...
+    def __gt__(self, other: _BaseVersion, /) -> bool: ...
+    def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ...
+
+class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]):
+    _version: Final[str]
+    def __init__(self, /, version: str) -> None: ...
+    @property
+    def public(self) -> str: ...
+    @property
+    def base_version(self) -> str: ...
+    @property
+    def local(self) -> None: ...
+    @property
+    def is_prerelease(self) -> L[False]: ...
+    @property
+    def is_postrelease(self) -> L[False]: ...
+
+class Version(
+    _BaseVersion[
+        tuple[
+            int,  # epoch
+            tuple[int, ...],  # release
+            tuple[str, int] | _InfinityType | _NegativeInfinityType,  # pre
+            tuple[str, int] | _NegativeInfinityType,  # post
+            tuple[str, int] | _InfinityType,  # dev
+            tuple[tuple[int, L[""]] | tuple[_NegativeInfinityType, str], ...] | _NegativeInfinityType,  # local
+        ],
+    ],
+):
+    _regex: ClassVar[re.Pattern[str]] = ...
+    _version: Final[str]
+
+    def __init__(self, /, version: str) -> None: ...
+    @property
+    def public(self) -> str: ...
+    @property
+    def base_version(self) -> str: ...
+    @property
+    def local(self) -> str | None: ...
+    @property
+    def is_prerelease(self) -> bool: ...
+    @property
+    def is_postrelease(self) -> bool: ...
+
+#
+def parse(version: str) -> Version | LegacyVersion: ...
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/__init__.py b/pythonProject/.venv/Lib/site-packages/onnx/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8a28e576e0e18d9742347dfe5a63247d230ba50
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/__init__.py
@@ -0,0 +1,370 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import annotations
+
+__all__ = [
+    # Constants
+    "ONNX_ML",
+    "IR_VERSION",
+    "IR_VERSION_2017_10_10",
+    "IR_VERSION_2017_10_30",
+    "IR_VERSION_2017_11_3",
+    "IR_VERSION_2019_1_22",
+    "IR_VERSION_2019_3_18",
+    "IR_VERSION_2019_9_19",
+    "IR_VERSION_2020_5_8",
+    "IR_VERSION_2021_7_30",
+    "IR_VERSION_2023_5_5",
+    "IR_VERSION_2024_3_25",
+    "EXPERIMENTAL",
+    "STABLE",
+    # Modules
+    "checker",
+    "compose",
+    "defs",
+    "gen_proto",
+    "helper",
+    "hub",
+    "numpy_helper",
+    "parser",
+    "printer",
+    "shape_inference",
+    "utils",
+    "version_converter",
+    # Proto classes
+    "AttributeProto",
+    "DeviceConfigurationProto",
+    "FunctionProto",
+    "GraphProto",
+    "IntIntListEntryProto",
+    "MapProto",
+    "ModelProto",
+    "NodeDeviceConfigurationProto",
+    "NodeProto",
+    "OperatorProto",
+    "OperatorSetIdProto",
+    "OperatorSetProto",
+    "OperatorStatus",
+    "OptionalProto",
+    "SequenceProto",
+    "SimpleShardedDimProto",
+    "ShardedDimProto",
+    "ShardingSpecProto",
+    "SparseTensorProto",
+    "StringStringEntryProto",
+    "TensorAnnotation",
+    "TensorProto",
+    "TensorShapeProto",
+    "TrainingInfoProto",
+    "TypeProto",
+    "ValueInfoProto",
+    "Version",
+    # Utility functions
+    "convert_model_to_external_data",
+    "load_external_data_for_model",
+    "load_model_from_string",
+    "load_model",
+    "load_tensor_from_string",
+    "load_tensor",
+    "save_model",
+    "save_tensor",
+    "write_external_data_tensors",
+]
+# isort:skip_file
+
+import os
+import typing
+from typing import IO, Literal, Union
+
+
+from onnx import serialization
+from onnx.onnx_cpp2py_export import ONNX_ML
+from onnx.external_data_helper import (
+    load_external_data_for_model,
+    write_external_data_tensors,
+    convert_model_to_external_data,
+)
+from onnx.onnx_pb import (
+    AttributeProto,
+    DeviceConfigurationProto,
+    EXPERIMENTAL,
+    FunctionProto,
+    GraphProto,
+    IntIntListEntryProto,
+    IR_VERSION,
+    IR_VERSION_2017_10_10,
+    IR_VERSION_2017_10_30,
+    IR_VERSION_2017_11_3,
+    IR_VERSION_2019_1_22,
+    IR_VERSION_2019_3_18,
+    IR_VERSION_2019_9_19,
+    IR_VERSION_2020_5_8,
+    IR_VERSION_2021_7_30,
+    IR_VERSION_2023_5_5,
+    IR_VERSION_2024_3_25,
+    ModelProto,
+    NodeDeviceConfigurationProto,
+    NodeProto,
+    OperatorSetIdProto,
+    OperatorStatus,
+    STABLE,
+    SimpleShardedDimProto,
+    ShardedDimProto,
+    ShardingSpecProto,
+    SparseTensorProto,
+    StringStringEntryProto,
+    TensorAnnotation,
+    TensorProto,
+    TensorShapeProto,
+    TrainingInfoProto,
+    TypeProto,
+    ValueInfoProto,
+    Version,
+)
+from onnx.onnx_operators_pb import OperatorProto, OperatorSetProto
+from onnx.onnx_data_pb import MapProto, OptionalProto, SequenceProto
+import onnx.version
+
+# Import common subpackages so they're available when you 'import onnx'
+from onnx import (
+    checker,
+    compose,
+    defs,
+    gen_proto,
+    helper,
+    hub,
+    numpy_helper,
+    parser,
+    printer,
+    shape_inference,
+    utils,
+    version_converter,
+)
+
+__version__ = onnx.version.version
+
+# Supported model formats that can be loaded from and saved to
+# The literals are formats with built-in support. But we also allow users to
+# register their own formats. So we allow str as well.
+_SupportedFormat = Union[
+    Literal["protobuf", "textproto", "onnxtxt", "json"], str  # noqa: PYI051
+]
+# Default serialization format
+_DEFAULT_FORMAT = "protobuf"
+
+
+def _load_bytes(f: IO[bytes] | str | os.PathLike) -> bytes:
+    if hasattr(f, "read") and callable(typing.cast("IO[bytes]", f).read):
+        content = typing.cast("IO[bytes]", f).read()
+    else:
+        f = typing.cast("Union[str, os.PathLike]", f)
+        with open(f, "rb") as readable:
+            content = readable.read()
+    return content
+
+
+def _save_bytes(content: bytes, f: IO[bytes] | str | os.PathLike) -> None:
+    if hasattr(f, "write") and callable(typing.cast("IO[bytes]", f).write):
+        typing.cast("IO[bytes]", f).write(content)
+    else:
+        f = typing.cast("Union[str, os.PathLike]", f)
+        with open(f, "wb") as writable:
+            writable.write(content)
+
+
+def _get_file_path(f: IO[bytes] | str | os.PathLike | None) -> str | None:
+    if isinstance(f, (str, os.PathLike)):
+        return os.path.abspath(f)
+    if hasattr(f, "name"):
+        assert f is not None
+        return os.path.abspath(f.name)
+    return None
+
+
+def _get_serializer(
+    fmt: _SupportedFormat | None, f: str | os.PathLike | IO[bytes] | None = None
+) -> serialization.ProtoSerializer:
+    """Get the serializer for the given path and format from the serialization registry."""
+    # Use fmt if it is specified
+    if fmt is not None:
+        return serialization.registry.get(fmt)
+
+    if (file_path := _get_file_path(f)) is not None:
+        _, ext = os.path.splitext(file_path)
+        fmt = serialization.registry.get_format_from_file_extension(ext)
+
+    # Failed to resolve format if fmt is None. Use protobuf as default
+    fmt = fmt or _DEFAULT_FORMAT
+    assert fmt is not None
+
+    return serialization.registry.get(fmt)
+
+
+def load_model(
+    f: IO[bytes] | str | os.PathLike,
+    format: _SupportedFormat | None = None,  # noqa: A002
+    load_external_data: bool = True,
+) -> ModelProto:
+    """Loads a serialized ModelProto into memory.
+
+    Args:
+        f: can be a file-like object (has "read" function) or a string/PathLike containing a file name
+        format: The serialization format. When it is not specified, it is inferred
+            from the file extension when ``f`` is a path. If not specified _and_
+            ``f`` is not a path, 'protobuf' is used. The encoding is assumed to
+            be "utf-8" when the format is a text format.
+        load_external_data: Whether to load the external data.
+            Set to True if the data is under the same directory of the model.
+            If not, users need to call :func:`load_external_data_for_model`
+            with directory to load external data from.
+
+    Returns:
+        Loaded in-memory ModelProto.
+    """
+    model = _get_serializer(format, f).deserialize_proto(_load_bytes(f), ModelProto())
+
+    if load_external_data:
+        model_filepath = _get_file_path(f)
+        if model_filepath:
+            base_dir = os.path.dirname(model_filepath)
+            load_external_data_for_model(model, base_dir)
+
+    return model
+
+
+def load_tensor(
+    f: IO[bytes] | str | os.PathLike,
+    format: _SupportedFormat | None = None,  # noqa: A002
+) -> TensorProto:
+    """Loads a serialized TensorProto into memory.
+
+    Args:
+        f: can be a file-like object (has "read" function) or a string/PathLike containing a file name
+        format: The serialization format. When it is not specified, it is inferred
+            from the file extension when ``f`` is a path. If not specified _and_
+            ``f`` is not a path, 'protobuf' is used. The encoding is assumed to
+            be "utf-8" when the format is a text format.
+
+    Returns:
+        Loaded in-memory TensorProto.
+    """
+    return _get_serializer(format, f).deserialize_proto(_load_bytes(f), TensorProto())
+
+
+def load_model_from_string(
+    s: bytes | str,
+    format: _SupportedFormat = _DEFAULT_FORMAT,  # noqa: A002
+) -> ModelProto:
+    """Loads a binary string (bytes) that contains serialized ModelProto.
+
+    Args:
+        s: a string, which contains serialized ModelProto
+        format: The serialization format. When it is not specified, it is inferred
+            from the file extension when ``f`` is a path. If not specified _and_
+            ``f`` is not a path, 'protobuf' is used. The encoding is assumed to
+            be "utf-8" when the format is a text format.
+
+    Returns:
+        Loaded in-memory ModelProto.
+    """
+    return _get_serializer(format).deserialize_proto(s, ModelProto())
+
+
+def load_tensor_from_string(
+    s: bytes,
+    format: _SupportedFormat = _DEFAULT_FORMAT,  # noqa: A002
+) -> TensorProto:
+    """Loads a binary string (bytes) that contains serialized TensorProto.
+
+    Args:
+        s: a string, which contains serialized TensorProto
+        format: The serialization format. When it is not specified, it is inferred
+            from the file extension when ``f`` is a path. If not specified _and_
+            ``f`` is not a path, 'protobuf' is used. The encoding is assumed to
+            be "utf-8" when the format is a text format.
+
+    Returns:
+        Loaded in-memory TensorProto.
+    """
+    return _get_serializer(format).deserialize_proto(s, TensorProto())
+
+
+def save_model(
+    proto: ModelProto | bytes,
+    f: IO[bytes] | str | os.PathLike,
+    format: _SupportedFormat | None = None,  # noqa: A002
+    *,
+    save_as_external_data: bool = False,
+    all_tensors_to_one_file: bool = True,
+    location: str | None = None,
+    size_threshold: int = 1024,
+    convert_attribute: bool = False,
+) -> None:
+    """Saves the ModelProto to the specified path and optionally, serialize tensors with raw data as external data before saving.
+
+    Args:
+        proto: should be a in-memory ModelProto
+        f: can be a file-like object (has "write" function) or a string containing
+        a file name or a pathlike object
+        format: The serialization format. When it is not specified, it is inferred
+            from the file extension when ``f`` is a path. If not specified _and_
+            ``f`` is not a path, 'protobuf' is used. The encoding is assumed to
+            be "utf-8" when the format is a text format.
+        save_as_external_data: If true, save tensors to external file(s).
+        all_tensors_to_one_file: Effective only if save_as_external_data is True.
+            If true, save all tensors to one external file specified by location.
+            If false, save each tensor to a file named with the tensor name.
+        location: Effective only if save_as_external_data is true.
+            Specify the external file that all tensors to save to.
+            Path is relative to the model path.
+            If not specified, will use the model name.
+        size_threshold: Effective only if save_as_external_data is True.
+            Threshold for size of data. Only when tensor's data is >= the size_threshold it will be converted
+            to external data. To convert every tensor with raw data to external data set size_threshold=0.
+        convert_attribute: Effective only if save_as_external_data is True.
+            If true, convert all tensors to external data
+            If false, convert only non-attribute tensors to external data
+    """
+    if isinstance(proto, bytes):
+        proto = _get_serializer(_DEFAULT_FORMAT).deserialize_proto(proto, ModelProto())
+
+    if save_as_external_data:
+        convert_model_to_external_data(
+            proto, all_tensors_to_one_file, location, size_threshold, convert_attribute
+        )
+
+    model_filepath = _get_file_path(f)
+    if model_filepath is not None:
+        basepath = os.path.dirname(model_filepath)
+        proto = write_external_data_tensors(proto, basepath)
+
+    serialized = _get_serializer(format, model_filepath).serialize_proto(proto)
+    _save_bytes(serialized, f)
+
+
+def save_tensor(
+    proto: TensorProto,
+    f: IO[bytes] | str | os.PathLike,
+    format: _SupportedFormat | None = None,  # noqa: A002
+) -> None:
+    """Saves the TensorProto to the specified path.
+
+    Args:
+        proto: should be a in-memory TensorProto
+        f: can be a file-like object (has "write" function) or a string
+        containing a file name or a pathlike object.
+        format: The serialization format. When it is not specified, it is inferred
+            from the file extension when ``f`` is a path. If not specified _and_
+            ``f`` is not a path, 'protobuf' is used. The encoding is assumed to
+            be "utf-8" when the format is a text format.
+    """
+    serialized = _get_serializer(format, f).serialize_proto(proto)
+    _save_bytes(serialized, f)
+
+
+# For backward compatibility
+load = load_model
+load_from_string = load_model_from_string
+save = save_model
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/_mapping.py b/pythonProject/.venv/Lib/site-packages/onnx/_mapping.py
new file mode 100644
index 0000000000000000000000000000000000000000..110fc571504df0d246a2cb406bed06978c76aba6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/_mapping.py
@@ -0,0 +1,113 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import NamedTuple
+
+import ml_dtypes
+import numpy as np
+
+from onnx.onnx_pb import TensorProto
+
+
+class TensorDtypeMap(NamedTuple):
+    np_dtype: np.dtype
+    storage_dtype: int
+    name: str
+
+
+# tensor_dtype: (numpy type, storage type, string name)
+# The storage type is the type used to store the tensor in the *_data field of
+# a TensorProto. All available fields are float_data, int32_data, int64_data,
+# string_data, uint64_data and double_data.
+TENSOR_TYPE_MAP: dict[int, TensorDtypeMap] = {
+    int(TensorProto.FLOAT): TensorDtypeMap(
+        np.dtype("float32"), int(TensorProto.FLOAT), "TensorProto.FLOAT"
+    ),
+    int(TensorProto.UINT8): TensorDtypeMap(
+        np.dtype("uint8"), int(TensorProto.INT32), "TensorProto.UINT8"
+    ),
+    int(TensorProto.INT8): TensorDtypeMap(
+        np.dtype("int8"), int(TensorProto.INT32), "TensorProto.INT8"
+    ),
+    int(TensorProto.UINT16): TensorDtypeMap(
+        np.dtype("uint16"), int(TensorProto.INT32), "TensorProto.UINT16"
+    ),
+    int(TensorProto.INT16): TensorDtypeMap(
+        np.dtype("int16"), int(TensorProto.INT32), "TensorProto.INT16"
+    ),
+    int(TensorProto.INT32): TensorDtypeMap(
+        np.dtype("int32"), int(TensorProto.INT32), "TensorProto.INT32"
+    ),
+    int(TensorProto.INT64): TensorDtypeMap(
+        np.dtype("int64"), int(TensorProto.INT64), "TensorProto.INT64"
+    ),
+    int(TensorProto.BOOL): TensorDtypeMap(
+        np.dtype("bool"), int(TensorProto.INT32), "TensorProto.BOOL"
+    ),
+    int(TensorProto.FLOAT16): TensorDtypeMap(
+        np.dtype("float16"), int(TensorProto.INT32), "TensorProto.FLOAT16"
+    ),
+    int(TensorProto.BFLOAT16): TensorDtypeMap(
+        np.dtype(ml_dtypes.bfloat16),
+        int(TensorProto.INT32),
+        "TensorProto.BFLOAT16",
+    ),
+    int(TensorProto.DOUBLE): TensorDtypeMap(
+        np.dtype("float64"), int(TensorProto.DOUBLE), "TensorProto.DOUBLE"
+    ),
+    int(TensorProto.COMPLEX64): TensorDtypeMap(
+        np.dtype("complex64"), int(TensorProto.FLOAT), "TensorProto.COMPLEX64"
+    ),
+    int(TensorProto.COMPLEX128): TensorDtypeMap(
+        np.dtype("complex128"),
+        int(TensorProto.DOUBLE),
+        "TensorProto.COMPLEX128",
+    ),
+    int(TensorProto.UINT32): TensorDtypeMap(
+        np.dtype("uint32"), int(TensorProto.UINT64), "TensorProto.UINT32"
+    ),
+    int(TensorProto.UINT64): TensorDtypeMap(
+        np.dtype("uint64"), int(TensorProto.UINT64), "TensorProto.UINT64"
+    ),
+    int(TensorProto.STRING): TensorDtypeMap(
+        np.dtype("object"), int(TensorProto.STRING), "TensorProto.STRING"
+    ),
+    int(TensorProto.FLOAT8E4M3FN): TensorDtypeMap(
+        np.dtype(ml_dtypes.float8_e4m3fn),
+        int(TensorProto.INT32),
+        "TensorProto.FLOAT8E4M3FN",
+    ),
+    int(TensorProto.FLOAT8E4M3FNUZ): TensorDtypeMap(
+        np.dtype(ml_dtypes.float8_e4m3fnuz),
+        int(TensorProto.INT32),
+        "TensorProto.FLOAT8E4M3FNUZ",
+    ),
+    int(TensorProto.FLOAT8E5M2): TensorDtypeMap(
+        np.dtype(ml_dtypes.float8_e5m2),
+        int(TensorProto.INT32),
+        "TensorProto.FLOAT8E5M2",
+    ),
+    int(TensorProto.FLOAT8E5M2FNUZ): TensorDtypeMap(
+        np.dtype(ml_dtypes.float8_e5m2fnuz),
+        int(TensorProto.INT32),
+        "TensorProto.FLOAT8E5M2FNUZ",
+    ),
+    int(TensorProto.UINT4): TensorDtypeMap(
+        np.dtype(ml_dtypes.uint4), int(TensorProto.INT32), "TensorProto.UINT4"
+    ),
+    int(TensorProto.INT4): TensorDtypeMap(
+        np.dtype(ml_dtypes.int4), int(TensorProto.INT32), "TensorProto.INT4"
+    ),
+    int(TensorProto.FLOAT4E2M1): TensorDtypeMap(
+        np.dtype(ml_dtypes.float4_e2m1fn),
+        int(TensorProto.INT32),
+        "TensorProto.FLOAT4E2M1",
+    ),
+    int(TensorProto.FLOAT8E8M0): TensorDtypeMap(
+        np.dtype(ml_dtypes.float8_e8m0fnu),
+        int(TensorProto.INT32),
+        "TensorProto.FLOAT8E8M0",
+    ),
+}
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/__init__.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..de8e3a385434d6c541b8b544342983b60f8eb0c3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/base.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e77c9fd4bc3971e92ab0d6e0f2ea0bc09df1106
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/base.py
@@ -0,0 +1,148 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from collections import namedtuple
+from typing import TYPE_CHECKING, Any, NewType
+
+import onnx.checker
+import onnx.onnx_cpp2py_export.checker as c_checker
+from onnx import IR_VERSION, ModelProto, NodeProto
+
+if TYPE_CHECKING:
+    from collections.abc import Sequence
+
+    import numpy
+
+
+class DeviceType:
+    """Describes device type."""
+
+    _Type = NewType("_Type", int)
+    CPU: _Type = _Type(0)
+    CUDA: _Type = _Type(1)
+
+
+class Device:
+    """Describes device type and device id
+    syntax: device_type:device_id(optional)
+    example: 'CPU', 'CUDA', 'CUDA:1'
+    """
+
+    def __init__(self, device: str) -> None:
+        options = device.split(":")
+        self.type = getattr(DeviceType, options[0])
+        self.device_id = 0
+        if len(options) > 1:
+            self.device_id = int(options[1])
+
+
+def namedtupledict(
+    typename: str, field_names: Sequence[str], *args: Any, **kwargs: Any
+) -> type[tuple[Any, ...]]:
+    field_names_map = {n: i for i, n in enumerate(field_names)}
+    # Some output names are invalid python identifier, e.g. "0"
+    kwargs.setdefault("rename", True)
+    data = namedtuple(typename, field_names, *args, **kwargs)  # type: ignore  # noqa: PYI024
+
+    def getitem(self: Any, key: Any) -> Any:
+        if isinstance(key, str):
+            key = field_names_map[key]
+        return super(type(self), self).__getitem__(key)  # type: ignore
+
+    data.__getitem__ = getitem  # type: ignore[assignment]
+    return data
+
+
+class BackendRep:
+    """BackendRep is the handle that a Backend returns after preparing to execute
+    a model repeatedly. Users will then pass inputs to the run function of
+    BackendRep to retrieve the corresponding results.
+    """
+
+    def run(self, inputs: Any, **kwargs: Any) -> tuple[Any, ...]:  # noqa: ARG002
+        """Abstract function."""
+        return (None,)
+
+
+class Backend:
+    """Backend is the entity that will take an ONNX model with inputs,
+    perform a computation, and then return the output.
+
+    For one-off execution, users can use run_node and run_model to obtain results quickly.
+
+    For repeated execution, users should use prepare, in which the Backend
+    does all of the preparation work for executing the model repeatedly
+    (e.g., loading initializers), and returns a BackendRep handle.
+    """
+
+    @classmethod
+    def is_compatible(
+        cls,
+        model: ModelProto,  # noqa: ARG003
+        device: str = "CPU",  # noqa: ARG003
+        **kwargs: Any,  # noqa: ARG003
+    ) -> bool:
+        # Return whether the model is compatible with the backend.
+        return True
+
+    @classmethod
+    def prepare(
+        cls,
+        model: ModelProto,
+        device: str = "CPU",  # noqa: ARG003
+        **kwargs: Any,  # noqa: ARG003
+    ) -> BackendRep | None:
+        # TODO Remove Optional from return type
+        onnx.checker.check_model(model)
+        return None
+
+    @classmethod
+    def run_model(
+        cls, model: ModelProto, inputs: Any, device: str = "CPU", **kwargs: Any
+    ) -> tuple[Any, ...]:
+        backend = cls.prepare(model, device, **kwargs)
+        assert backend is not None
+        return backend.run(inputs)
+
+    @classmethod
+    def run_node(
+        cls,
+        node: NodeProto,
+        inputs: Any,  # noqa: ARG003
+        device: str = "CPU",  # noqa: ARG003
+        outputs_info: (  # noqa: ARG003
+            Sequence[tuple[numpy.dtype, tuple[int, ...]]] | None
+        ) = None,
+        **kwargs: dict[str, Any],
+    ) -> tuple[Any, ...] | None:
+        """Simple run one operator and return the results.
+
+        Args:
+            node: The node proto.
+            inputs: Inputs to the node.
+            device: The device to run on.
+            outputs_info: a list of tuples, which contains the element type and
+                shape of each output. First element of the tuple is the dtype, and
+                the second element is the shape. More use case can be found in
+                https://github.com/onnx/onnx/blob/main/onnx/backend/test/runner/__init__.py
+            kwargs: Other keyword arguments.
+        """
+        # TODO Remove Optional from return type
+        if "opset_version" in kwargs:
+            special_context = c_checker.CheckerContext()
+            special_context.ir_version = IR_VERSION
+            special_context.opset_imports = {"": kwargs["opset_version"]}  # type: ignore
+            onnx.checker.check_node(node, special_context)
+        else:
+            onnx.checker.check_node(node)
+
+        return None
+
+    @classmethod
+    def supports_device(cls, device: str) -> bool:  # noqa: ARG003
+        """Checks whether the backend is compiled with particular device support.
+        In particular it's used in the testing suite.
+        """
+        return True
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/__init__.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1772a77dcab736dd48287b71f55345158068c13f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c2323278d921e594d2025900d0614e83edc9f31
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/__init__.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe1ffcf1ef81998b3f003d1cfc5c4a28d05b7ebf
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/__init__.py
@@ -0,0 +1,30 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import importlib
+import inspect
+import pkgutil
+import sys
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from types import ModuleType
+
+
+def collect_sample_implementations() -> dict[str, str]:
+    dict_: dict[str, str] = {}
+    _recursive_scan(sys.modules[__name__], dict_)
+    return dict_
+
+
+def _recursive_scan(package: ModuleType, dict_: dict[str, str]) -> None:
+    pkg_dir = package.__path__
+    module_location = package.__name__
+    for _module_loader, name, ispkg in pkgutil.iter_modules(pkg_dir):
+        module_name = f"{module_location}.{name}"  # Module/package
+        module = importlib.import_module(module_name)
+        dict_[name] = inspect.getsource(module)
+        if ispkg:
+            _recursive_scan(module, dict_)
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47771dae32510b7bc6cf3479a5413fc62bdb72a7
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/__pycache__/abs.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/__pycache__/abs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a7dbd48fcc0a156077a665944a75d1f853a3b07
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/__pycache__/abs.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/abs.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/abs.py
new file mode 100644
index 0000000000000000000000000000000000000000..683743cb7e9fe82b7657c45862ea135dbab3075a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/sample/ops/abs.py
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+
+def abs(input: np.ndarray) -> np.ndarray:  # noqa: A001
+    return np.abs(input)  # type: ignore[no-any-return]
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/__init__.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cf51d22689cc036a425fa1049450031b59d99dd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/__init__.py
@@ -0,0 +1,8 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+__all__ = ["BackendTest"]
+# for backward compatibility
+from onnx.backend.test.runner import Runner as BackendTest
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__init__.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1082362ad99a85db7e6b5a51da14a65dc00b9fea
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import sys
+
+from onnx.backend.test.case.base import Snippets
+from onnx.backend.test.case.utils import import_recursive
+
+
+def collect_snippets() -> dict[str, list[tuple[str, str]]]:
+    import_recursive(sys.modules[__name__])
+    return Snippets
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..58114e9662c42320584d2fc452c7b9ead5a4b9fa
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/base.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..542c3c9b74ab659ace70f375086bcc693625c922
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/base.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/test_case.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/test_case.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..373af0e1eb49b21be559557d60b1655e88866e11
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/test_case.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/utils.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df2659a588f4217730d37e8ef4eef3023ef3698e
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/__pycache__/utils.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/base.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e1e49f80cc62fe4e57855959f61f256c0a430dc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/base.py
@@ -0,0 +1,47 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import inspect
+from collections import defaultdict
+from textwrap import dedent
+from typing import Any, ClassVar
+
+import numpy as np
+
+
+def process_snippet(op_name: str, name: str, export: Any) -> tuple[str, str]:
+    snippet_name = name[len("export_") :] or op_name.lower()
+    source_code = dedent(inspect.getsource(export))
+    # remove the function signature line
+    lines = source_code.splitlines()
+    assert lines[0] == "@staticmethod"
+    assert lines[1].startswith("def export")
+    return snippet_name, dedent("\n".join(lines[2:]))
+
+
+Snippets: dict[str, list[tuple[str, str]]] = defaultdict(list)
+
+
+class _Exporter(type):
+    exports: ClassVar[dict[str, list[tuple[str, str]]]] = defaultdict(list)
+
+    def __init__(
+        cls, name: str, bases: tuple[type[Any], ...], dct: dict[str, Any]
+    ) -> None:
+        for k, v in dct.items():
+            if k.startswith("export"):
+                if not isinstance(v, staticmethod):
+                    raise ValueError("Only staticmethods could be named as export.*")
+                export = getattr(cls, k)
+                Snippets[name].append(process_snippet(name, k, export))
+                # export functions should call expect and so populate
+                # TestCases
+                np.random.seed(seed=0)
+                export()
+        super().__init__(name, bases, dct)
+
+
+class Base(metaclass=_Exporter):
+    pass
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__init__.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..164654787dc9c715f4366ade2d8f7d4be8196e9f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__init__.py
@@ -0,0 +1,82 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import sys
+from typing import TYPE_CHECKING
+
+from onnx.backend.test.case.test_case import TestCase
+from onnx.backend.test.case.utils import import_recursive
+
+if TYPE_CHECKING:
+    from collections.abc import Sequence
+
+    import numpy as np
+
+    from onnx import ModelProto
+
+_SimpleModelTestCases = []
+
+
+def expect(
+    model: ModelProto,
+    inputs: Sequence[np.ndarray],
+    outputs: Sequence[np.ndarray],
+    name: str | None = None,
+) -> None:
+    name = name or model.graph.name
+    _SimpleModelTestCases.append(
+        TestCase(
+            name=name,
+            model_name=model.graph.name,
+            url=None,
+            model_dir=None,
+            model=model,
+            data_sets=[(inputs, outputs)],
+            kind="simple",
+            rtol=1e-3,
+            atol=1e-7,
+        )
+    )
+
+
+# BASE_URL = "https://download.onnxruntime.ai/onnx/models"
+BASE_URL = "onnx/backend/test/data/light/light_%s.onnx"
+
+
+def collect_testcases() -> list[TestCase]:
+    """Collect model test cases defined in python/numpy code."""
+    real_model_testcases = []
+
+    model_tests = [
+        ("test_bvlc_alexnet", "bvlc_alexnet", 1e-3, 1e-7),
+        ("test_densenet121", "densenet121", 2e-3, 1e-7),
+        ("test_inception_v1", "inception_v1", 1e-3, 1e-7),
+        ("test_inception_v2", "inception_v2", 1e-3, 1e-7),
+        ("test_resnet50", "resnet50", 1e-3, 1e-7),
+        ("test_shufflenet", "shufflenet", 1e-3, 1e-7),
+        ("test_squeezenet", "squeezenet", 1e-3, 1e-7),
+        ("test_vgg19", "vgg19", 1e-3, 1e-7),
+        ("test_zfnet512", "zfnet512", 1e-3, 1e-7),
+    ]
+
+    for test_name, model_name, rtol, atol in model_tests:
+        url = BASE_URL % model_name
+        real_model_testcases.append(
+            TestCase(
+                name=test_name,
+                model_name=model_name,
+                url=url,
+                model_dir=None,
+                model=None,
+                data_sets=None,
+                kind="real",
+                rtol=rtol,
+                atol=atol,
+            )
+        )
+
+    import_recursive(sys.modules[__name__])
+
+    return real_model_testcases + _SimpleModelTestCases
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a2e041e2ec931e73a3fd02349a7921c88156e83
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/expand.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/expand.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..328f9da95f6d8963707f46261dcde4344dcd3f23
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/expand.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/gradient.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/gradient.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50a561b7d21d9b0fb83ad5e53e635cae29be2a59
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/gradient.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sequence.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sequence.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd100f78d1d45360e64b5442328106982661463a
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sequence.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/shrink.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/shrink.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82746b750429ee3b8f3c6758dae6f379a060ac2a
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/shrink.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sign.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sign.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9b66d1e07889d3104586923729be20acebba9ce1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/sign.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/single-relu.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/single-relu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9403225cecc2dafce8ec37de07c517d1b3c36e76
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/single-relu.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/stringnormalizer.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/stringnormalizer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2374dcf381d69d6a8bf01d566483ecb6d552b724
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/__pycache__/stringnormalizer.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/expand.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/expand.py
new file mode 100644
index 0000000000000000000000000000000000000000..38f7f47dad1e2f9b4f9880e3d9b0849536219b77
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/expand.py
@@ -0,0 +1,92 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.model import expect
+
+if TYPE_CHECKING:
+    from collections.abc import Sequence
+
+
+class ExpandDynamicShape(Base):
+    @staticmethod
+    def export() -> None:
+        def make_graph(
+            node: onnx.helper.NodeProto,
+            input_shape: Sequence[int],
+            shape_shape: Sequence[int],
+            output_shape: Sequence[int],
+        ) -> onnx.helper.GraphProto:
+            graph = onnx.helper.make_graph(
+                nodes=[node],
+                name="Expand",
+                inputs=[
+                    onnx.helper.make_tensor_value_info(
+                        "X", onnx.TensorProto.FLOAT, input_shape
+                    ),
+                    onnx.helper.make_tensor_value_info(
+                        "shape", onnx.TensorProto.INT64, shape_shape
+                    ),
+                ],
+                outputs=[
+                    onnx.helper.make_tensor_value_info(
+                        "Y", onnx.TensorProto.FLOAT, output_shape
+                    )
+                ],
+            )
+            return graph
+
+        node = onnx.helper.make_node("Expand", ["X", "shape"], ["Y"], name="test")
+        input_shape = [1, 3, 1]
+        x = np.ones(input_shape, dtype=np.float32)
+
+        # 1st testcase
+        shape = np.array([3, 1], dtype=np.int64)
+        y = x * np.ones(shape, dtype=np.float32)
+        graph = make_graph(node, input_shape, shape.shape, y.shape)
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 9)],
+        )
+        expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model1")
+
+        # 2nd testcase
+        shape = np.array([1, 3], dtype=np.int64)
+        y = x * np.ones(shape, dtype=np.float32)
+        graph = make_graph(node, input_shape, shape.shape, y.shape)
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 9)],
+        )
+        expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model2")
+
+        # 3rd testcase
+        shape = np.array([3, 1, 3], dtype=np.int64)
+        y = x * np.ones(shape, dtype=np.float32)
+        graph = make_graph(node, input_shape, shape.shape, y.shape)
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 9)],
+        )
+        expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model3")
+
+        # 4th testcase
+        shape = np.array([3, 3, 1, 3], dtype=np.int64)
+        y = x * np.ones(shape, dtype=np.float32)
+        graph = make_graph(node, input_shape, shape.shape, y.shape)
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 9)],
+        )
+        expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model4")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/gradient.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/gradient.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8e90d63f5c68975017e03c76e04a62ea77b2b73
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/gradient.py
@@ -0,0 +1,110 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.model import expect
+from onnx.defs import AI_ONNX_PREVIEW_TRAINING_DOMAIN, ONNX_DOMAIN
+
+
+class Gradient(Base):
+    @staticmethod
+    def export_gradient_scalar_add() -> None:
+        add_node = onnx.helper.make_node("Add", ["a", "b"], ["c"], name="my_add")
+        gradient_node = onnx.helper.make_node(
+            "Gradient",
+            ["a", "b"],
+            ["dc_da", "dc_db"],
+            name="my_gradient",
+            domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
+            xs=["a", "b"],
+            y="c",
+        )
+
+        a = np.array(1.0).astype(np.float32)
+        b = np.array(2.0).astype(np.float32)
+        c = a + b
+        # dc / da = d(a+b) / da = 1
+        dc_da = np.array(1).astype(np.float32)
+        # db / db = d(a+b) / db = 1
+        dc_db = np.array(1).astype(np.float32)
+
+        graph = onnx.helper.make_graph(
+            nodes=[add_node, gradient_node],
+            name="GradientOfAdd",
+            inputs=[
+                onnx.helper.make_tensor_value_info("a", onnx.TensorProto.FLOAT, []),
+                onnx.helper.make_tensor_value_info("b", onnx.TensorProto.FLOAT, []),
+            ],
+            outputs=[
+                onnx.helper.make_tensor_value_info("c", onnx.TensorProto.FLOAT, []),
+                onnx.helper.make_tensor_value_info("dc_da", onnx.TensorProto.FLOAT, []),
+                onnx.helper.make_tensor_value_info("dc_db", onnx.TensorProto.FLOAT, []),
+            ],
+        )
+        opsets = [
+            onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),
+            onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1),
+        ]
+        model = onnx.helper.make_model_gen_version(
+            graph, producer_name="backend-test", opset_imports=opsets
+        )
+        expect(
+            model, inputs=[a, b], outputs=[c, dc_da, dc_db], name="test_gradient_of_add"
+        )
+
+    @staticmethod
+    def export_gradient_scalar_add_and_mul() -> None:
+        add_node = onnx.helper.make_node("Add", ["a", "b"], ["c"], name="my_add")
+        mul_node = onnx.helper.make_node("Mul", ["c", "a"], ["d"], name="my_mul")
+        gradient_node = onnx.helper.make_node(
+            "Gradient",
+            ["a", "b"],
+            ["dd_da", "dd_db"],
+            name="my_gradient",
+            domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
+            xs=["a", "b"],
+            y="d",
+        )
+
+        a = np.array(1.0).astype(np.float32)
+        b = np.array(2.0).astype(np.float32)
+        c = a + b
+        # d = a * c = a * (a + b)
+        d = a * c
+        # dd / da = d(a*a+a*b) / da = 2 * a + b
+        dd_da = (2 * a + b).astype(np.float32)
+        # dd / db = d(a*a+a*b) / db = a
+        dd_db = a
+
+        graph = onnx.helper.make_graph(
+            nodes=[add_node, mul_node, gradient_node],
+            name="GradientOfTwoOperators",
+            inputs=[
+                onnx.helper.make_tensor_value_info("a", onnx.TensorProto.FLOAT, []),
+                onnx.helper.make_tensor_value_info("b", onnx.TensorProto.FLOAT, []),
+            ],
+            outputs=[
+                onnx.helper.make_tensor_value_info("d", onnx.TensorProto.FLOAT, []),
+                onnx.helper.make_tensor_value_info("dd_da", onnx.TensorProto.FLOAT, []),
+                onnx.helper.make_tensor_value_info("dd_db", onnx.TensorProto.FLOAT, []),
+            ],
+        )
+
+        opsets = [
+            onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),
+            onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1),
+        ]
+        model = onnx.helper.make_model_gen_version(
+            graph, producer_name="backend-test", opset_imports=opsets
+        )
+        expect(
+            model,
+            inputs=[a, b],
+            outputs=[d, dd_da, dd_db],
+            name="test_gradient_of_add_and_mul",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/sequence.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/sequence.py
new file mode 100644
index 0000000000000000000000000000000000000000..10d773fd7ceb47964e5d7cae9400793a897a3151
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/sequence.py
@@ -0,0 +1,457 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import annotations
+
+import typing
+
+import numpy as np
+
+import onnx
+from onnx import TensorProto
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.model import expect
+
+
+def SequenceEmptyImpl() -> list[np.ndarray | None]:
+    return []
+
+
+def SequenceConstructImpl(*tensors: np.ndarray) -> list[np.ndarray]:
+    return list(tensors)
+
+
+def SequenceInsertImpl(
+    sequence: list[np.ndarray], tensor: np.ndarray, position: int | None = None
+) -> list[np.ndarray]:
+    if position is None:
+        position = len(sequence)
+    sequence.insert(position, tensor)
+    return sequence
+
+
+def SequenceAtImpl(sequence: list[np.ndarray], position: int) -> np.ndarray:
+    return sequence[position]
+
+
+def SequenceEraseImpl(
+    sequence: list[np.ndarray], position: int | None = None
+) -> list[np.ndarray | None]:
+    if position is None:
+        position = -1
+    del sequence[position]
+    return sequence
+
+
+def SequenceLengthImpl(sequence: list[np.ndarray]) -> np.int64:
+    return np.int64(len(sequence))
+
+
+def SplitToSequenceImpl(
+    tensor: np.ndarray,
+    split: int | list[int] | None = None,
+    axis: int = 0,
+    keepdims: int = 1,
+) -> list[np.ndarray]:
+    dim_size = tensor.shape[axis]
+    if split is None:
+        split = 1
+        split_indices = [
+            i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size
+        ]
+        if not keepdims:
+            results = np.array_split(tensor, split_indices, axis)
+            return [np.squeeze(res, axis) for res in results]
+    if np.isscalar(split):
+        split_indices = [
+            i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size
+        ]
+    else:
+        split_indices = np.cumsum(split) + 1
+    return np.array_split(tensor, split_indices, axis)
+
+
+def ConcatFromSequenceImpl(
+    sequence: list[np.ndarray], axis: int, new_axis: int | None = 0
+) -> np.ndarray:
+    if not new_axis:
+        return np.concatenate(sequence, axis)
+    return np.stack(sequence, axis)
+
+
+class Sequence(Base):
+    @staticmethod
+    def export() -> None:
+        def make_graph(
+            nodes: list[onnx.helper.NodeProto],
+            input_shapes: list[typing.Sequence[str | int] | None],
+            output_shapes: list[typing.Sequence[str | int] | None],
+            input_names: list[str],
+            output_names: list[str],
+            input_types: list[TensorProto.DataType],
+            output_types: list[TensorProto.DataType],
+            initializers: list[TensorProto] | None = None,
+        ) -> onnx.helper.GraphProto:
+            graph = onnx.helper.make_graph(
+                nodes=nodes,
+                name="Sequence",
+                inputs=[
+                    onnx.helper.make_tensor_value_info(name, input_type, input_shape)
+                    for name, input_type, input_shape in zip(
+                        input_names, input_types, input_shapes
+                    )
+                ],
+                outputs=[
+                    onnx.helper.make_tensor_value_info(name, output_type, output_shape)
+                    for name, output_type, output_shape in zip(
+                        output_names, output_types, output_shapes
+                    )
+                ],
+                initializer=initializers,
+            )
+            return graph
+
+        # 1st testcase - insert and at.
+        # 1. SequenceEmpty:         -> []
+        # 2. SequenceInsert(x):     -> [x]
+        # 3. SequenceInsert(y):     -> [x, y]
+        # 4. SequenceInsert(z, 1):  -> [x, z, y]
+        # 5. SequenceAt(2):         -> y
+        seq_empty_node = onnx.helper.make_node("SequenceEmpty", [], ["Seq_empty"])
+        seq_insert_node = onnx.helper.make_node(
+            "SequenceInsert", ["Seq_empty", "X"], ["Seq_1"]
+        )
+        seq_insert_node2 = onnx.helper.make_node(
+            "SequenceInsert", ["Seq_1", "Y"], ["Seq_2"]
+        )
+        seq_insert_node3 = onnx.helper.make_node(
+            "SequenceInsert", ["Seq_2", "Z", "pos"], ["Seq_3"]
+        )
+        seq_at_node = onnx.helper.make_node("SequenceAt", ["Seq_3", "pos_at"], ["out"])
+
+        x_shape = [2, 3, 4]
+        y_shape = [1, 3, 4]
+        z_shape = [3, 3, 4]
+        out_shape = [None, 3, 4]
+
+        x = np.ones(x_shape, dtype=np.float32)
+        y = np.zeros(y_shape, dtype=np.float32)
+        z = np.ones(z_shape, dtype=np.float32) * 2
+        pos_val = 1
+        pos_at_val = 2
+
+        out = SequenceEmptyImpl()
+        out = SequenceInsertImpl(out, x)
+        out = SequenceInsertImpl(out, y)
+        out = SequenceInsertImpl(out, z, pos_val)
+        out = SequenceAtImpl(out, pos_at_val)
+        assert np.array_equal(out, y)
+
+        pos = onnx.helper.make_tensor("pos", TensorProto.INT64, (), (pos_val,))
+        pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
+
+        graph = make_graph(
+            [
+                seq_empty_node,
+                seq_insert_node,
+                seq_insert_node2,
+                seq_insert_node3,
+                seq_at_node,
+            ],
+            [x_shape, y_shape, z_shape, [], []],
+            [out_shape],
+            ["X", "Y", "Z", "pos", "pos_at"],
+            ["out"],
+            [onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2,
+            [onnx.TensorProto.FLOAT],
+            [pos, pos_at],
+        )
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 12)],
+        )
+        expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model1")
+
+        # 2nd testcase - erase and at.
+        # 1. SequenceConstruct(x, y, z):    -> [x, y, z]
+        # 2. SequenceErase(1):              -> [x, z]
+        # 3. SequenceAt(1):                 -> z
+        seq_construct_node = onnx.helper.make_node(
+            "SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
+        )
+        seq_erase_node = onnx.helper.make_node(
+            "SequenceErase", ["seq_1", "pos_erase"], ["seq_2"]
+        )
+        seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_2", "pos_at"], ["out"])
+
+        tensor_shape = [2, 3, 4]
+
+        x = np.ones(tensor_shape, dtype=np.float32)
+        y = np.zeros(tensor_shape, dtype=np.float32)
+        z = np.ones(tensor_shape, dtype=np.float32) * 2
+        pos_erase_val = 1
+        pos_at_val = 1
+
+        out = SequenceConstructImpl(x, y, z)
+        out = SequenceEraseImpl(out, pos_erase_val)
+        out = SequenceAtImpl(out, pos_at_val)
+        assert np.array_equal(out, z)
+
+        pos_erase = onnx.helper.make_tensor(
+            "pos_erase", TensorProto.INT64, (), (pos_erase_val,)
+        )
+        pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
+
+        graph = make_graph(
+            [seq_construct_node, seq_erase_node, seq_at_node],
+            [tensor_shape, tensor_shape, tensor_shape, [], []],
+            [tensor_shape],
+            ["X", "Y", "Z", "pos_erase", "pos_at"],
+            ["out"],
+            [onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2,
+            [onnx.TensorProto.FLOAT],
+            [pos_erase, pos_at],
+        )
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 12)],
+        )
+        expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model2")
+
+        # 3rd testcase - erase, insert and at, with negative index value.
+        # 1. SequenceConstruct(x, y, z):    -> [x, y, z]
+        # 2. SequenceErase(-3):             -> [y, z]
+        # 3. SequenceInsert(x, -1):         -> [y, x, z]
+        # 4. SequenceAt(-1):                -> z
+        seq_construct_node = onnx.helper.make_node(
+            "SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
+        )
+        seq_erase_node = onnx.helper.make_node(
+            "SequenceErase", ["seq_1", "pos_erase"], ["seq_2"]
+        )
+        seq_insert_node = onnx.helper.make_node(
+            "SequenceInsert", ["seq_2", "X", "pos_insert"], ["seq_3"]
+        )
+        seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_3", "pos_at"], ["out"])
+
+        tensor_shape = [2, 3, 4]
+
+        x = np.ones(tensor_shape, dtype=np.float32)
+        y = np.zeros(tensor_shape, dtype=np.float32)
+        z = np.ones(tensor_shape, dtype=np.float32) * 2
+        pos_erase_val = -3
+        pos_insert_val = -1
+        pos_at_val = -1
+        out = SequenceConstructImpl(x, y, z)
+        out = SequenceEraseImpl(out, pos_erase_val)
+        out = SequenceInsertImpl(out, x, pos_insert_val)
+        out = SequenceAtImpl(out, pos_at_val)
+        assert np.array_equal(out, z)
+
+        pos_erase = onnx.helper.make_tensor(
+            "pos_erase", TensorProto.INT64, (), (pos_erase_val,)
+        )
+        pos_insert = onnx.helper.make_tensor(
+            "pos_insert", TensorProto.INT64, (), (pos_insert_val,)
+        )
+        pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
+
+        graph = make_graph(
+            [seq_construct_node, seq_erase_node, seq_insert_node, seq_at_node],
+            [tensor_shape, tensor_shape, tensor_shape, [], [], []],
+            [tensor_shape],
+            ["X", "Y", "Z", "pos_erase", "pos_insert", "pos_at"],
+            ["out"],
+            [onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 3,
+            [onnx.TensorProto.FLOAT],
+            [pos_erase, pos_insert, pos_at],
+        )
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 12)],
+        )
+        expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model3")
+
+        # 4th testcase - concat
+        seq_construct_node = onnx.helper.make_node(
+            "SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
+        )
+        seq_concat_node = onnx.helper.make_node(
+            "ConcatFromSequence", ["seq_1"], ["out"], axis=1
+        )
+
+        tensor_shape = [2, 3, 4]
+        concat_out_shape = [2, None, 4]
+
+        x = np.ones(tensor_shape, dtype=np.float32)
+        y = np.zeros(tensor_shape, dtype=np.float32)
+        z = np.ones(tensor_shape, dtype=np.float32) * 2
+        out = SequenceConstructImpl(x, y, z)
+        concat_out = ConcatFromSequenceImpl(out, 1)
+
+        graph = make_graph(
+            [seq_construct_node, seq_concat_node],
+            [tensor_shape] * 3,
+            [concat_out_shape],
+            ["X", "Y", "Z"],
+            ["out"],
+            [onnx.TensorProto.FLOAT] * 3,
+            [onnx.TensorProto.FLOAT],
+        )
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 12)],
+        )
+        expect(
+            model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model4"
+        )
+
+        # 5th testcase - concat with new_axis = 1
+        seq_construct_node = onnx.helper.make_node(
+            "SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
+        )
+        seq_concat_node = onnx.helper.make_node(
+            "ConcatFromSequence", ["seq_1"], ["out"], axis=-1, new_axis=1
+        )
+
+        tensor_shape = [2, 3, 4]
+        concat_out_shape = [2, 3, 4, 3]
+
+        x = np.ones(tensor_shape, dtype=np.float32)
+        y = np.zeros(tensor_shape, dtype=np.float32)
+        z = np.ones(tensor_shape, dtype=np.float32) * 2
+        out = SequenceConstructImpl(x, y, z)
+        concat_out = ConcatFromSequenceImpl(out, -1, 1)
+
+        graph = make_graph(
+            [seq_construct_node, seq_concat_node],
+            [tensor_shape] * 3,
+            [concat_out_shape],
+            ["X", "Y", "Z"],
+            ["out"],
+            [onnx.TensorProto.FLOAT] * 3,
+            [onnx.TensorProto.FLOAT],
+        )
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 12)],
+        )
+        expect(
+            model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model5"
+        )
+
+        # 6th testcase - split and len
+        seq_split_node = onnx.helper.make_node(
+            "SplitToSequence", ["X"], ["seq_1"], axis=-1
+        )
+        seq_len_node = onnx.helper.make_node("SequenceLength", ["seq_1"], ["len"])
+
+        tensor_shape = [2, 3, 4]
+        len_shape = []
+
+        x = np.ones(tensor_shape, dtype=np.float32)
+        out = SplitToSequenceImpl(x, axis=-1)
+        out = SequenceLengthImpl(out)
+        assert np.array_equal(out, np.int64(4))
+
+        graph = onnx.helper.make_graph(
+            nodes=[seq_split_node, seq_len_node],
+            name="Sequence",
+            inputs=[
+                onnx.helper.make_tensor_value_info(
+                    "X", onnx.TensorProto.FLOAT, tensor_shape
+                )
+            ],
+            outputs=[
+                onnx.helper.make_tensor_value_info(
+                    "len", onnx.TensorProto.INT64, len_shape
+                )
+            ],
+        )
+
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 12)],
+        )
+        expect(model, inputs=[x], outputs=[out], name="test_sequence_model6")
+
+        # 7th testcase - split with keepdims=0, and SequenceAt
+        seq_split_node = onnx.helper.make_node(
+            "SplitToSequence", ["X"], ["seq_1"], axis=0, keepdims=0
+        )
+        seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_1", "pos_at"], ["out"])
+
+        tensor_shape = [2, 3, 4]
+        out_shape = [3, 4]
+
+        x = np.random.rand(*tensor_shape)
+        pos_at_val = 1
+        out = SplitToSequenceImpl(x, axis=0, keepdims=0)
+        out = SequenceAtImpl(out, pos_at_val)
+        assert np.array_equal(out, x[pos_at_val])
+
+        pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
+
+        graph = make_graph(
+            [seq_split_node, seq_at_node],
+            [tensor_shape, []],
+            [out_shape],
+            ["X", "pos_at"],
+            ["out"],
+            [onnx.TensorProto.DOUBLE, onnx.TensorProto.INT64],
+            [onnx.TensorProto.DOUBLE],
+            [pos_at],
+        )
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 12)],
+        )
+        expect(model, inputs=[x], outputs=[out], name="test_sequence_model7")
+
+        # 8th testcase - split zero length
+        seq_split_node = onnx.helper.make_node(
+            "SplitToSequence", ["X", "Splits"], ["seq_1"]
+        )
+        seq_len_node = onnx.helper.make_node("SequenceLength", ["seq_1"], ["len"])
+
+        tensor_shape = ["n"]
+        splits_shape = [3]
+
+        x = np.array([]).astype(np.float32)
+        splits = np.array([0, 0, 0]).astype(np.int64)
+        out_len = np.int64(3)
+
+        graph = onnx.helper.make_graph(
+            nodes=[seq_split_node, seq_len_node],
+            name="Sequence",
+            inputs=[
+                onnx.helper.make_tensor_value_info(
+                    "X", onnx.TensorProto.FLOAT, tensor_shape
+                ),
+                onnx.helper.make_tensor_value_info(
+                    "Splits", onnx.TensorProto.INT64, splits_shape
+                ),
+            ],
+            outputs=[
+                onnx.helper.make_tensor_value_info(
+                    "len", onnx.TensorProto.INT64, len_shape
+                )
+            ],
+        )
+
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 12)],
+        )
+        expect(
+            model, inputs=[x, splits], outputs=[out_len], name="test_sequence_model8"
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/shrink.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/shrink.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6e4b3b3c9db76e1789af64b8cd661e9cf38e433
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/shrink.py
@@ -0,0 +1,42 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.model import expect
+
+
+class ShrinkTest(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Shrink",
+            ["x"],
+            ["y"],
+            lambd=1.5,
+            bias=1.5,
+        )
+        graph = onnx.helper.make_graph(
+            nodes=[node],
+            name="Shrink",
+            inputs=[
+                onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [5])
+            ],
+            outputs=[
+                onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [5])
+            ],
+        )
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 10)],
+        )
+
+        x = np.array([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)
+        y = np.array([-0.5, 0.0, 0.0, 0.0, 0.5], dtype=np.float32)
+
+        expect(model, inputs=[x], outputs=[y], name="test_shrink")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/sign.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/sign.py
new file mode 100644
index 0000000000000000000000000000000000000000..38e2c112c3225b906d03135e1ce255b711118f98
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/sign.py
@@ -0,0 +1,36 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.model import expect
+
+
+class SingleSign(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node("Sign", ["x"], ["y"], name="test")
+
+        x = np.array([-1.0, 4.5, -4.5, 3.1, 0.0, 2.4, -5.5]).astype(np.float32)
+        y = np.array([-1.0, 1.0, -1.0, 1.0, 0.0, 1.0, -1.0]).astype(np.float32)
+
+        graph = onnx.helper.make_graph(
+            nodes=[node],
+            name="SingleSign",
+            inputs=[
+                onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [7])
+            ],
+            outputs=[
+                onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [7])
+            ],
+        )
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 9)],
+        )
+        expect(model, inputs=[x], outputs=[y], name="test_sign_model")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/single-relu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/single-relu.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c7d605300c5b04840e310f56c039dba713a8845
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/single-relu.py
@@ -0,0 +1,36 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.model import expect
+
+
+class SingleRelu(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node("Relu", ["x"], ["y"], name="test")
+        graph = onnx.helper.make_graph(
+            nodes=[node],
+            name="SingleRelu",
+            inputs=[
+                onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [1, 2])
+            ],
+            outputs=[
+                onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1, 2])
+            ],
+        )
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 9)],
+        )
+
+        x = np.random.randn(1, 2).astype(np.float32)
+        y = np.maximum(x, 0)
+
+        expect(model, inputs=[x], outputs=[y], name="test_single_relu_model")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/stringnormalizer.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/stringnormalizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6e7af57bb12dd6918cd58c758462bfc5ad3a0e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/model/stringnormalizer.py
@@ -0,0 +1,206 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.model import expect
+
+if TYPE_CHECKING:
+    from collections.abc import Sequence
+
+
+class NormalizeStrings(Base):
+    @staticmethod
+    def export() -> None:
+        def make_graph(
+            node: onnx.helper.NodeProto,
+            input_shape: Sequence[int],
+            output_shape: Sequence[int],
+        ) -> onnx.helper.GraphProto:
+            graph = onnx.helper.make_graph(
+                nodes=[node],
+                name="StringNormalizer",
+                inputs=[
+                    onnx.helper.make_tensor_value_info(
+                        "x", onnx.TensorProto.STRING, input_shape
+                    )
+                ],
+                outputs=[
+                    onnx.helper.make_tensor_value_info(
+                        "y", onnx.TensorProto.STRING, output_shape
+                    )
+                ],
+            )
+            return graph
+
+        # 1st model_monday_casesensintive_nochangecase
+        stopwords = ["monday"]
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            is_case_sensitive=1,
+            stopwords=stopwords,
+        )
+
+        x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
+        y = np.array(["tuesday", "wednesday", "thursday"]).astype(object)
+
+        graph = make_graph(node, [4], [3])
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 10)],
+        )
+        expect(
+            model,
+            inputs=[x],
+            outputs=[y],
+            name="test_strnorm_model_monday_casesensintive_nochangecase",
+        )
+
+        # 2nd model_nostopwords_nochangecase
+        node = onnx.helper.make_node(
+            "StringNormalizer", inputs=["x"], outputs=["y"], is_case_sensitive=1
+        )
+
+        x = np.array(["monday", "tuesday"]).astype(object)
+        y = x
+
+        graph = make_graph(node, [2], [2])
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 10)],
+        )
+        expect(
+            model,
+            inputs=[x],
+            outputs=[y],
+            name="test_strnorm_model_nostopwords_nochangecase",
+        )
+
+        # 3rd model_monday_casesensintive_lower
+        stopwords = ["monday"]
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            case_change_action="LOWER",
+            is_case_sensitive=1,
+            stopwords=stopwords,
+        )
+
+        x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
+        y = np.array(["tuesday", "wednesday", "thursday"]).astype(object)
+
+        graph = make_graph(node, [4], [3])
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 10)],
+        )
+        expect(
+            model,
+            inputs=[x],
+            outputs=[y],
+            name="test_strnorm_model_monday_casesensintive_lower",
+        )
+
+        # 4 model_monday_casesensintive_upper
+        stopwords = ["monday"]
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            case_change_action="UPPER",
+            is_case_sensitive=1,
+            stopwords=stopwords,
+        )
+
+        x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
+        y = np.array(["TUESDAY", "WEDNESDAY", "THURSDAY"]).astype(object)
+
+        graph = make_graph(node, [4], [3])
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 10)],
+        )
+        expect(
+            model,
+            inputs=[x],
+            outputs=[y],
+            name="test_strnorm_model_monday_casesensintive_upper",
+        )
+
+        # 5 monday_insensintive_upper_twodim
+        stopwords = ["monday"]
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            case_change_action="UPPER",
+            stopwords=stopwords,
+        )
+
+        input_shape = [1, 6]
+        output_shape = [1, 4]
+        x = (
+            np.array(
+                ["Monday", "tuesday", "wednesday", "Monday", "tuesday", "wednesday"]
+            )
+            .astype(object)
+            .reshape(input_shape)
+        )
+        y = (
+            np.array(["TUESDAY", "WEDNESDAY", "TUESDAY", "WEDNESDAY"])
+            .astype(object)
+            .reshape(output_shape)
+        )
+
+        graph = make_graph(node, input_shape, output_shape)
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 10)],
+        )
+        expect(
+            model,
+            inputs=[x],
+            outputs=[y],
+            name="test_strnorm_model_monday_insensintive_upper_twodim",
+        )
+
+        # 6 monday_empty_output
+        stopwords = ["monday"]
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            case_change_action="UPPER",
+            is_case_sensitive=0,
+            stopwords=stopwords,
+        )
+
+        x = np.array(["monday", "monday"]).astype(object)
+        y = np.array([""]).astype(object)
+
+        graph = make_graph(node, [2], [1])
+        model = onnx.helper.make_model_gen_version(
+            graph,
+            producer_name="backend-test",
+            opset_imports=[onnx.helper.make_opsetid("", 10)],
+        )
+        expect(
+            model,
+            inputs=[x],
+            outputs=[y],
+            name="test_strnorm_model_monday_empty_output",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..af8427fbc9975cbab6b892ad0bae06e879bb3a63
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/abs.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/abs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..43f5c480f9c4906964d9d4afa8935d00db124a48
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/abs.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/acos.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/acos.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..804dffc88784d854dec7543194f4d900163a6a0d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/acos.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/acosh.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/acosh.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab0806d6766bb0ae8c85ce253c7b72cf392d6f72
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/acosh.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/adagrad.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/adagrad.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8574a4347561be800d8a14d4bc860e10da81582d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/adagrad.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/adam.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/adam.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88961006d1d5a4f31ecd67688806fc0d460b7a8f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/adam.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/add.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/add.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b1d04fd31ce7091101754978237902a64245ef5
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/add.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/affinegrid.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/affinegrid.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82006099ceaa7251077f605e5b9317d5654a6f96
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/affinegrid.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/and.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/and.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93fe9cedeac7244364986e70d6fbe0313904a73c
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/and.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/argmax.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/argmax.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d3861657c30fb588747b7d3b940937655e0b5677
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/argmax.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/argmin.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/argmin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9cdde8e7f18e408b72a5dc5a60facfde8cbb7c4f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/argmin.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/asin.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/asin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e45ab380e09facdb502e276a5e653cc4f389af53
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/asin.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/asinh.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/asinh.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e11b461a8ef697181dc97531309d41930d149e8
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/asinh.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/atan.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/atan.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64b453ee2d8ef55914d86266b2b2ffa2b4767c0c
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/atan.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/atanh.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/atanh.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f4b5bf4aa2b6bbb2020138bdce8f27f45ea21b6
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/atanh.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/attention.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/attention.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55e03daef63478d4745e35c56815c29e46aada4b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/attention.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/averagepool.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/averagepool.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f24adff6c2ecabd46326ace13935d2ed9b3efc1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/averagepool.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/batchnorm.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/batchnorm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..131af65faa3ff46170609bac07f48e3c9d2afff9
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/batchnorm.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bernoulli.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bernoulli.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ec23ac59460b62ffab4590e3224580d81c787a34
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bernoulli.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitshift.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitshift.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..15f0d002419e0dd28d0dc89415cc5877f672788a
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitshift.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwiseand.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwiseand.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10440033e835a201883f2e0b3ee97c58c65f26fe
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwiseand.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwisenot.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwisenot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9ff2ace49c8ee272ca2ee5b8659a4bb4b5cd6f2
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwisenot.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwiseor.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwiseor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d3e0facb61ed913a617ef87bb91bf16ceac7a66
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwiseor.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwisexor.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwisexor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e4d5f326c0dad8bb3d656b825c2b4a089ea7b18
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/bitwisexor.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/blackmanwindow.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/blackmanwindow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..616c408a1335f6687a02843dea1a9b679c114af3
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/blackmanwindow.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/cast.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/cast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c7be7fb7561a1b68acdbe15482b08aa305166eb0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/cast.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/globalmaxpool.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/globalmaxpool.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b6a7051dd8f5120495f5f9679646bd37c8b54d25
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/globalmaxpool.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/greater.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/greater.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e07835226aa55aa00e522f2cab52674b619bdd6c
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/greater.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/greater_equal.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/greater_equal.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7609d52cff992c179493f91a2db75017b0b5bc9a
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/greater_equal.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/gridsample.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/gridsample.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..41154ea34b95568c5d66a7c7bc3ef6bd5724e782
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/gridsample.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/groupnormalization.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/groupnormalization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55b581ce5b31398d074f4b3cab045f1f91eda287
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/groupnormalization.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/gru.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/gru.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4bfb8f734c75e91a805179f9a2f5c8394fe1436
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/gru.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hammingwindow.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hammingwindow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..500de0ec01a42e50a0143bd0313d63734d9e72a0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hammingwindow.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hannwindow.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hannwindow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bee566ea66396272d2c6bf6471d8ee20e66f59a5
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hannwindow.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hardmax.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hardmax.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cfc70c706e848f0917b06eaf92435c178c6b63bb
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hardmax.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hardsigmoid.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hardsigmoid.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..58d1368b887b8652616451bc6bfe429bcd2a78bf
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hardsigmoid.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hardswish.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hardswish.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59a2919ea238088a094ef02a37327963e5b1f106
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/hardswish.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/identity.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/identity.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2382c3f0b1f2853e359fc26f10e45b658143331
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/identity.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/if.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/if.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe46e535be5f2aa9a3a9569855fdd20ae294bd04
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/if.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/image_decoder.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/image_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e793b2ee9997a00b3cb08b77a25565dea380ee12
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/image_decoder.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/instancenorm.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/instancenorm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5229c8c2266769f49c6e712081deafaf3a9dd587
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/instancenorm.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/isinf.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/isinf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9b58466a6083e1f69b918e9143a095801a8eafd7
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/isinf.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/isnan.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/isnan.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dd0b050485cf5968d86f01d7776843d21e24f1fe
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/isnan.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/layernormalization.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/layernormalization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d03fe79d0ad1063c0b5d7d45305f4b4e9472f94
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/layernormalization.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/leakyrelu.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/leakyrelu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d439f678fcdbaf55ddc212ebb7e946f6ce5000b5
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/leakyrelu.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/less.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/less.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ade6ef4a7ad5845810e5cc072426eeb0b26c7845
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/less.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/less_equal.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/less_equal.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93a9137870b94b80d8384942923f915ba2fbfe86
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/less_equal.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/log.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/log.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8399f9e29a7a8201114fdc61ca969bdead637f1b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/log.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/logsoftmax.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/logsoftmax.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d22d8615fc3f45498b26883e9ef8a94ca8c1837
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/logsoftmax.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/loop.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/loop.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..568de9f3f4789cf4ddafe08620244109f2bdc00a
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/loop.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lpnormalization.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lpnormalization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..706bec5d317a633b825154392b556cfeb9d7b3f0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lpnormalization.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lppool.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lppool.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..94cdd80a8727200d31f32ab0e9f37817deaf25d4
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lppool.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lrn.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lrn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c04531f66a9fb5b1e74196cd2b134415cbde8769
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lrn.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lstm.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lstm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a17221549dd6878527dcab27526ff1ee9e93577
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/lstm.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/matmul.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/matmul.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d9a8ced22ff519064e01e8680256ae7e306d3bb
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/matmul.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/matmulinteger.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/matmulinteger.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b7bc8028660435d27a7ef8ce7fcc6bcd6337743
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/matmulinteger.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/max.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/max.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..397d0e44d5862ff6ca4f4f8fbacc804a17aca8de
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/max.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/maxpool.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/maxpool.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1215d4f53c0c67fdc547a3398316d62e450adf5
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/maxpool.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/maxunpool.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/maxunpool.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1b0d136d2c2524aa34e20b921d70e75b8d5aded
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/maxunpool.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reduce_log_sum.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reduce_log_sum.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08a293732e4ae06853f3854bd7df4a8c2627b8c9
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reduce_log_sum.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reduce_log_sum_exp.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reduce_log_sum_exp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..deaa7def751c3a6c451793ae3087c6937077c4c0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reduce_log_sum_exp.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducemean.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducemean.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e6f1ae0e50b9c39d6409eaa785644e165307ea3e
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducemean.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducemin.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducemin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b31e30142c87ef8a7a1ee16e41679bbd5628a7fe
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducemin.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reduceprod.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reduceprod.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d22b462c6e69537575a74b7b2607b8d0702bb4b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reduceprod.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducesum.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducesum.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..57f674511220103e7d1c4137ff2fb3d138fb7fbc
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducesum.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducesumsquare.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducesumsquare.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a07ccfa6e805fc8ca42ead2f58b18592d9e10771
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reducesumsquare.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/regex_full_match.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/regex_full_match.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb41f3d026c0d838f133d723faf3d2b794aaaee2
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/regex_full_match.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/relu.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/relu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..465a6d9787cdf17e6a2aa2de264f1c3435afe1d5
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/relu.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reshape.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reshape.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82d764fa2c7e3b5baf343898f0afcf047e14b329
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/reshape.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/rotaryembedding.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/rotaryembedding.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d20169cce763cef10cbcc3c6aec39e3a8eb51e1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/rotaryembedding.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/round.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/round.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae4bef25b23076dbc7f72e69ba3cb7b51e475cf2
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/round.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scan.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scan.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7e7a33c854c2bcac228f10d8fe20d162d7ef82f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scan.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scatter.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scatter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d69a226a147b41ef8958a2d473031933faca3526
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scatter.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scatterelements.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scatterelements.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..892ffe6f37aaf2d10cb1fd97789d997dae734e42
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scatterelements.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scatternd.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scatternd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88c668ef80c04654dbb3c0fe8d51bfcc0219194d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/scatternd.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/selu.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/selu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4393be3510a7055bd3b9821d441ffd391f8422c5
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/selu.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sequence_map.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sequence_map.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..326300237ead393c17ef8e865630d52f4730d97c
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sequence_map.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sequenceinsert.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sequenceinsert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48c06fdd50b39755665deee85d8c1cce415fea2d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sequenceinsert.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/shape.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/shape.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0996d1096273ed8cd064d31fe48bf450acb694b8
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/shape.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/shrink.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/shrink.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9e5c1084d8274196e48dd63eb044bb1a91f7230
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/shrink.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sigmoid.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sigmoid.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b7b4f06519ece47bbbefafd35d12914679494bae
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sigmoid.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sign.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sign.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3c89ea12d853f6b4294796e3121cc805a224f5d0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sign.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sin.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3d63517d6cdc021aad7c265a1660ffc2b637924
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sin.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sinh.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sinh.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8ec897aa7343d1e580790d4f3e52e4e3c286b4d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sinh.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/size.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/size.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1c3973737a05553812b26f7a0fc60876b72986a
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/size.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/slice.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/slice.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..92b1fbcb1c135bd06bfd29d9d9b5961828624b96
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/slice.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softmax.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softmax.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..85a31b02435be64d33766dcdf331ceddb8eae9c0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softmax.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softmaxcrossentropy.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softmaxcrossentropy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d44d98e43f9961eb4672c922554843dd454c0a70
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softmaxcrossentropy.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softplus.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softplus.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d5ff0333f0106566c2a456402c3d12d7db44d062
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softplus.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softsign.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softsign.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..137904f7753ce7a78b1632fb0c4def18ff3d9371
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/softsign.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/spacetodepth.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/spacetodepth.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50fc334e2cef2cb97ff296bc8110412668aa57e6
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/spacetodepth.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/split.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/split.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8c2c33124d14eecf51e556706918f3bbbd47ae7d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/split.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/splittosequence.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/splittosequence.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de66dc681c873f10171d3f856b5e1847fce55ee3
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/splittosequence.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sqrt.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sqrt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2392dc26a622781235deb55f1c9218c63331d8fc
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sqrt.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/squeeze.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/squeeze.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a89e40fef3af71c31fe1c47938cdef5abda12251
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/squeeze.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/stft.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/stft.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48ee8c65e9ccc614dd1b6d8e8386e1b8f706e671
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/stft.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/string_concat.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/string_concat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72675526a2ad096906b25991526fe750abc9a72b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/string_concat.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/string_split.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/string_split.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f9310f8a2d903853db0e2ecba527b5c208950fa
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/string_split.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/stringnormalizer.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/stringnormalizer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6db12f5bbd56db96276c45abd4a9b1dbf95dc56f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/stringnormalizer.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sub.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sub.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..240711b69c76e4d07f28e1be17e24314cc958598
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sub.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sum.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sum.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9b31e69383b11357953574b14ba0718afc319027
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/sum.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/swish.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/swish.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2fd4194b08789266795d72d8eef92d382144aab0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/swish.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tan.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tan.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4841ef2ea9ff708c19687941d3cc54de09db952
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tan.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tanh.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tanh.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..889a8c49548ce1a7798b5a661e19a417833a4f60
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tanh.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tensorscatter.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tensorscatter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d107bfbf7d885a988172699af776d214c781dc33
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tensorscatter.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tfidfvectorizer.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tfidfvectorizer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4df504f80e23cb90410a80e0b348b53f175c97f6
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tfidfvectorizer.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/thresholdedrelu.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/thresholdedrelu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f2e4e767b227c273f0d51db30fc1c3ccc3007c0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/thresholdedrelu.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tile.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tile.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..226d6d0fcb5828f64f9f1fb753b19f19c258a99f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/tile.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/topk.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/topk.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f108cf0758f675ad7a682f6cc33982e2574a5203
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/topk.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/transpose.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/transpose.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e90c19d7d4637623edeb9df6121b0382bc286485
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/transpose.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/trilu.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/trilu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c967508425a920ea40609dc4de2b8d4a9cf694e
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/trilu.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/unique.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/unique.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8803900e0a0d84de6c0286ab92aa853c8789115
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/unique.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/unsqueeze.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/unsqueeze.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9eb1fcc3d57c7f2766d9e6cc3c6799c6be91763d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/unsqueeze.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/upsample.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/upsample.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5b50eb3f9637de6453540de064e0055cb3e3ffe
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/upsample.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/where.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/where.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..307ecc6c8bb43a533f00aab70d4cb5a74cfe6be2
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/where.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/xor.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/xor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c3f367a267a8d871d148a693a54e7b945097fb9
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/__pycache__/xor.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/abs.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/abs.py
new file mode 100644
index 0000000000000000000000000000000000000000..23776e82671583fd2594397794456114ccd87405
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/abs.py
@@ -0,0 +1,24 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Abs(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Abs",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.abs(x)
+
+        expect(node, inputs=[x], outputs=[y], name="test_abs")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/acos.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/acos.py
new file mode 100644
index 0000000000000000000000000000000000000000..79dbe7125936918c3217ab967ca1089f9ada654e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/acos.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Acos(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Acos",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-0.5, 0, 0.5]).astype(np.float32)
+        y = np.arccos(x)
+        expect(node, inputs=[x], outputs=[y], name="test_acos_example")
+
+        x = np.random.rand(3, 4, 5).astype(np.float32)
+        y = np.arccos(x)
+        expect(node, inputs=[x], outputs=[y], name="test_acos")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/acosh.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/acosh.py
new file mode 100644
index 0000000000000000000000000000000000000000..c67c3044ab984d92837f9a4cc6bcaf34631a7469
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/acosh.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Acosh(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Acosh",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([10, np.e, 1]).astype(np.float32)
+        y = np.arccosh(x)  # expected output [2.99322295,  1.65745449,  0.]
+        expect(node, inputs=[x], outputs=[y], name="test_acosh_example")
+
+        x = np.random.uniform(1.0, 10.0, (3, 4, 5)).astype(np.float32)
+        y = np.arccosh(x)
+        expect(node, inputs=[x], outputs=[y], name="test_acosh")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/adagrad.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/adagrad.py
new file mode 100644
index 0000000000000000000000000000000000000000..fed01150f09ee711b7d5edc472566ec08c21f05f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/adagrad.py
@@ -0,0 +1,116 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.defs import AI_ONNX_PREVIEW_TRAINING_DOMAIN
+
+
+def apply_adagrad(r, t, x, g, h, norm_coefficient, epsilon, decay_factor):
+    # Compute adjusted learning-rate.
+    r_ = r / (1 + t * decay_factor)
+    # Add gradient of regularization term.
+    g_regularized = norm_coefficient * x + g
+    # Update squared accumulated gradient.
+    h_new = h + g_regularized * g_regularized
+    # Compute ADAGRAD's gradient scaling factors
+    h_sqrt = np.sqrt(h_new) + epsilon
+    # Apply ADAGRAD update rule.
+    x_new = x - r_ * g_regularized / h_sqrt
+    return (x_new.astype(x.dtype), h_new.astype(h.dtype))
+
+
+class Adagrad(Base):
+    @staticmethod
+    def export_adagrad() -> None:
+        # Define operator attributes.
+        norm_coefficient = 0.001
+        epsilon = 1e-5
+        decay_factor = 0.1
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "Adagrad",
+            inputs=["R", "T", "X", "G", "H"],
+            outputs=["X_new", "H_new"],
+            norm_coefficient=norm_coefficient,
+            epsilon=epsilon,
+            decay_factor=decay_factor,
+            domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
+        )
+
+        # Define operator inputs.
+        r = np.array(0.1, dtype=np.float32)  # scalar
+        t = np.array(0, dtype=np.int64)  # scalar
+        x = np.array([1.0], dtype=np.float32)
+        g = np.array([-1.0], dtype=np.float32)
+        h = np.array([2.0], dtype=np.float32)
+
+        # Compute expected outputs of Adagrad.
+        x_new, h_new = apply_adagrad(
+            r, t, x, g, h, norm_coefficient, epsilon, decay_factor
+        )
+
+        # Check results.
+        expect(
+            node,
+            inputs=[r, t, x, g, h],
+            outputs=[x_new, h_new],
+            name="test_adagrad",
+            opset_imports=[
+                onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)
+            ],
+        )
+
+    @staticmethod
+    def export_adagrad_multiple() -> None:
+        # Define operator attributes.
+        norm_coefficient = 0.001
+        epsilon = 1e-5
+        decay_factor = 0.1
+
+        node = onnx.helper.make_node(
+            "Adagrad",
+            inputs=["R", "T", "X1", "X2", "G1", "G2", "H1", "H2"],
+            outputs=["X1_new", "X2_new", "H1_new", "H2_new"],
+            norm_coefficient=norm_coefficient,
+            epsilon=epsilon,
+            decay_factor=decay_factor,
+            domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
+        )
+
+        # Define operator inputs.
+        r = np.array(0.1, dtype=np.float32)  # scalar
+        t = np.array(0, dtype=np.int64)  # scalar
+
+        x1 = np.array([1.0], dtype=np.float32)
+        g1 = np.array([-1.0], dtype=np.float32)
+        h1 = np.array([2.0], dtype=np.float32)
+
+        x2 = np.array([1.0, 2.0], dtype=np.float32)
+        g2 = np.array([-1.0, -3.0], dtype=np.float32)
+        h2 = np.array([4.0, 1.0], dtype=np.float32)
+
+        # Compute expected outputs of Adagrad.
+        x1_new, h1_new = apply_adagrad(
+            r, t, x1, g1, h1, norm_coefficient, epsilon, decay_factor
+        )
+        x2_new, h2_new = apply_adagrad(
+            r, t, x2, g2, h2, norm_coefficient, epsilon, decay_factor
+        )
+
+        # Check results.
+        expect(
+            node,
+            inputs=[r, t, x1, x2, g1, g2, h1, h2],
+            outputs=[x1_new, x2_new, h1_new, h2_new],
+            name="test_adagrad_multiple",
+            opset_imports=[
+                onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)
+            ],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/adam.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/adam.py
new file mode 100644
index 0000000000000000000000000000000000000000..e741839859900c1c0a52883dfba9661149f08091
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/adam.py
@@ -0,0 +1,134 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.defs import AI_ONNX_PREVIEW_TRAINING_DOMAIN
+
+
+def apply_adam(
+    r, t, x, g, v, h, norm_coefficient, norm_coefficient_post, alpha, beta, epsilon
+):
+    # Add gradient of regularization term.
+    g_regularized = norm_coefficient * x + g
+    # Update momentum.
+    v_new = alpha * v + (1 - alpha) * g_regularized
+    # Update second-order momentum.
+    h_new = beta * h + (1 - beta) * (g_regularized * g_regularized)
+    # Compute element-wise square root.
+    h_sqrt = np.sqrt(h_new) + epsilon
+    # Adjust learning rate.
+    r_adjusted = None
+    if t > 0:
+        # Consider bias correction on momentums.
+        r_adjusted = r * np.sqrt(1 - beta**t) / (1 - alpha**t)
+    else:
+        # No bias correction on momentums.
+        r_adjusted = r
+    # Apply Adam update rule.
+    x_new = x - r_adjusted * (v_new / h_sqrt)
+    # It's possible to apply regularization in the end.
+    x_final = (1 - norm_coefficient_post) * x_new
+    return x_final, v_new, h_new
+
+
+class Adam(Base):
+    @staticmethod
+    def export_adam() -> None:
+        # Define operator attributes.
+        norm_coefficient = 0.001
+        alpha = 0.95
+        beta = 0.1
+        epsilon = 1e-7
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "Adam",
+            inputs=["R", "T", "X", "G", "V", "H"],
+            outputs=["X_new", "V_new", "H_new"],
+            norm_coefficient=norm_coefficient,
+            alpha=alpha,
+            beta=beta,
+            epsilon=epsilon,
+            domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
+        )
+
+        # Define operator inputs.
+        r = np.array(0.1, dtype=np.float32)  # scalar
+        t = np.array(0, dtype=np.int64)  # scalar
+        x = np.array([1.2, 2.8], dtype=np.float32)
+        g = np.array([-0.94, -2.5], dtype=np.float32)
+        v = np.array([1.7, 3.6], dtype=np.float32)
+        h = np.array([0.1, 0.1], dtype=np.float32)
+
+        # Compute expected outputs of Adam.
+        x_new, v_new, h_new = apply_adam(
+            r, t, x, g, v, h, norm_coefficient, 0.0, alpha, beta, epsilon
+        )
+
+        # Check results.
+        expect(
+            node,
+            inputs=[r, t, x, g, v, h],
+            outputs=[x_new, v_new, h_new],
+            name="test_adam",
+            opset_imports=[
+                onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)
+            ],
+        )
+
+    @staticmethod
+    def export_adam_multiple() -> None:
+        # Define operator attributes.
+        norm_coefficient = 0.001
+        alpha = 0.95
+        beta = 0.85
+        epsilon = 1e-2
+
+        node = onnx.helper.make_node(
+            "Adam",
+            inputs=["R", "T", "X1", "X2", "G1", "G2", "V1", "V2", "H1", "H2"],
+            outputs=["X1_new", "X2_new", "V1_new", "V2_new", "H1_new", "H2_new"],
+            norm_coefficient=norm_coefficient,
+            alpha=alpha,
+            beta=beta,
+            domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
+        )
+
+        # Define operator inputs.
+        r = np.array(0.1, dtype=np.float32)  # scalar
+        t = np.array(0, dtype=np.int64)  # scalar
+
+        x1 = np.array([1.0], dtype=np.float32)
+        g1 = np.array([-1.0], dtype=np.float32)
+        v1 = np.array([2.0], dtype=np.float32)
+        h1 = np.array([0.5], dtype=np.float32)
+
+        x2 = np.array([1.0, 2.0], dtype=np.float32)
+        g2 = np.array([-1.0, -3.0], dtype=np.float32)
+        v2 = np.array([4.0, 1.0], dtype=np.float32)
+        h2 = np.array([1.0, 10.0], dtype=np.float32)
+
+        # Compute expected outputs of Adam.
+        x1_new, v1_new, h1_new = apply_adam(
+            r, t, x1, g1, v1, h1, norm_coefficient, 0.0, alpha, beta, epsilon
+        )
+        x2_new, v2_new, h2_new = apply_adam(
+            r, t, x2, g2, v2, h2, norm_coefficient, 0.0, alpha, beta, epsilon
+        )
+
+        # Check results.
+        expect(
+            node,
+            inputs=[r, t, x1, x2, g1, g2, v1, v2, h1, h2],
+            outputs=[x1_new, x2_new, v1_new, v2_new, h1_new, h2_new],
+            name="test_adam_multiple",
+            opset_imports=[
+                onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)
+            ],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/add.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/add.py
new file mode 100644
index 0000000000000000000000000000000000000000..36d1e5d015d38e92d7f6981348dc35318910b001
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/add.py
@@ -0,0 +1,60 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Add(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Add",
+            inputs=["x", "y"],
+            outputs=["sum"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(3, 4, 5).astype(np.float32)
+        expect(node, inputs=[x, y], outputs=[x + y], name="test_add")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.int8)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.int8)
+        expect(node, inputs=[x, y], outputs=[x + y], name="test_add_int8")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.int16)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.int16)
+        expect(node, inputs=[x, y], outputs=[x + y], name="test_add_int16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        expect(node, inputs=[x, y], outputs=[x + y], name="test_add_uint8")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        expect(node, inputs=[x, y], outputs=[x + y], name="test_add_uint16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        expect(node, inputs=[x, y], outputs=[x + y], name="test_add_uint32")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        expect(node, inputs=[x, y], outputs=[x + y], name="test_add_uint64")
+
+    @staticmethod
+    def export_add_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "Add",
+            inputs=["x", "y"],
+            outputs=["sum"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(5).astype(np.float32)
+        expect(node, inputs=[x, y], outputs=[x + y], name="test_add_bcast")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/affinegrid.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/affinegrid.py
new file mode 100644
index 0000000000000000000000000000000000000000..71581fa24fdfbe3fd2a915dbfebab5e86eb43204
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/affinegrid.py
@@ -0,0 +1,210 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.reference.ops.op_affine_grid import (
+    apply_affine_transform,
+    construct_original_grid,
+)
+
+
+def create_affine_matrix_3d(
+    angle1,
+    angle2,
+    offset_x,
+    offset_y,
+    offset_z,
+    shear_x,
+    shear_y,
+    shear_z,
+    scale_x,
+    scale_y,
+    scale_z,
+):
+    rot_x = np.stack(
+        [
+            np.ones_like(angle1),
+            np.zeros_like(angle1),
+            np.zeros_like(angle1),
+            np.zeros_like(angle1),
+            np.cos(angle1),
+            -np.sin(angle1),
+            np.zeros_like(angle1),
+            np.sin(angle1),
+            np.cos(angle1),
+        ],
+        axis=-1,
+    ).reshape(-1, 3, 3)
+    rot_y = np.stack(
+        [
+            np.cos(angle2),
+            np.zeros_like(angle2),
+            np.sin(angle2),
+            np.zeros_like(angle2),
+            np.ones_like(angle2),
+            np.zeros_like(angle2),
+            -np.sin(angle2),
+            np.zeros_like(angle2),
+            np.cos(angle2),
+        ],
+        axis=-1,
+    ).reshape(-1, 3, 3)
+    shear = np.stack(
+        [
+            np.ones_like(shear_x),
+            shear_x,
+            shear_y,
+            shear_z,
+            np.ones_like(shear_x),
+            shear_x,
+            shear_y,
+            shear_x,
+            np.ones_like(shear_x),
+        ],
+        axis=-1,
+    ).reshape(-1, 3, 3)
+    scale = np.stack(
+        [
+            scale_x,
+            np.zeros_like(scale_x),
+            np.zeros_like(scale_x),
+            np.zeros_like(scale_x),
+            scale_y,
+            np.zeros_like(scale_x),
+            np.zeros_like(scale_x),
+            np.zeros_like(scale_x),
+            scale_z,
+        ],
+        axis=-1,
+    ).reshape(-1, 3, 3)
+    translation = np.transpose(np.array([offset_x, offset_y, offset_z])).reshape(
+        -1, 1, 3
+    )
+    rotation_matrix = rot_y @ rot_x @ shear @ scale  # (N, 3, 3)
+    rotation_matrix = np.transpose(rotation_matrix, (0, 2, 1))
+    affine_matrix = np.hstack((rotation_matrix, translation))
+    affine_matrix = np.transpose(affine_matrix, (0, 2, 1))
+    return affine_matrix.astype(np.float32)
+
+
+def create_affine_matrix_2d(
+    angle1, offset_x, offset_y, shear_x, shear_y, scale_x, scale_y
+):
+    rot = np.stack(
+        [np.cos(angle1), -np.sin(angle1), np.sin(angle1), np.cos(angle1)], axis=-1
+    ).reshape(-1, 2, 2)
+    shear = np.stack(
+        [np.ones_like(shear_x), shear_x, shear_y, np.ones_like(shear_x)], axis=-1
+    ).reshape(-1, 2, 2)
+    scale = np.stack(
+        [scale_x, np.zeros_like(scale_x), np.zeros_like(scale_x), scale_y], axis=-1
+    ).reshape(-1, 2, 2)
+    translation = np.transpose(np.array([offset_x, offset_y])).reshape(-1, 1, 2)
+    rotation_matrix = rot @ shear @ scale  # (N, 3, 3)
+    rotation_matrix = np.transpose(rotation_matrix, (0, 2, 1))
+    affine_matrix = np.hstack((rotation_matrix, translation))
+    affine_matrix = np.transpose(affine_matrix, (0, 2, 1))
+    return affine_matrix.astype(np.float32)
+
+
+def create_theta_2d():
+    angle = np.array([np.pi / 4, np.pi / 3])
+    offset_x = np.array([5.0, 2.5])
+    offset_y = np.array([-3.3, 1.1])
+    shear_x = np.array([-0.5, 0.5])
+    shear_y = np.array([0.3, -0.3])
+    scale_x = np.array([2.2, 1.1])
+    scale_y = np.array([3.1, 0.9])
+    theta_2d = create_affine_matrix_2d(
+        angle, offset_x, offset_y, shear_x, shear_y, scale_x, scale_y
+    )
+    return theta_2d
+
+
+def create_theta_3d():
+    angle1 = np.array([np.pi / 4, np.pi / 3])
+    angle2 = np.array([np.pi / 6, np.pi / 2])
+    offset_x = np.array([5.0, 2.5])
+    offset_y = np.array([-3.3, 1.1])
+    offset_z = np.array([-1.1, 2.2])
+    shear_x = np.array([-0.5, 0.5])
+    shear_y = np.array([0.3, -0.3])
+    shear_z = np.array([0.7, -0.2])
+    scale_x = np.array([2.2, 1.1])
+    scale_y = np.array([3.1, 0.9])
+    scale_z = np.array([0.5, 1.5])
+
+    theta_3d = create_affine_matrix_3d(
+        angle1,
+        angle2,
+        offset_x,
+        offset_y,
+        offset_z,
+        shear_x,
+        shear_y,
+        shear_z,
+        scale_x,
+        scale_y,
+        scale_z,
+    )
+    return theta_3d
+
+
+class AffineGrid(Base):
+    @staticmethod
+    def export_2d_no_reference_evaluator() -> None:
+        theta_2d = create_theta_2d()
+        N, C, H, W = len(theta_2d), 3, 5, 6
+        data_size = (H, W)
+        for align_corners in (0, 1):
+            node = onnx.helper.make_node(
+                "AffineGrid",
+                inputs=["theta", "size"],
+                outputs=["grid"],
+                align_corners=align_corners,
+            )
+
+            original_grid = construct_original_grid(data_size, align_corners)
+            grid = apply_affine_transform(theta_2d, original_grid)
+
+            test_name = "test_affine_grid_2d"
+            if align_corners == 1:
+                test_name += "_align_corners"
+            expect(
+                node,
+                inputs=[theta_2d, np.array([N, C, H, W], dtype=np.int64)],
+                outputs=[grid],
+                name=test_name,
+            )
+
+    @staticmethod
+    def export_3d_no_reference_evaluator() -> None:
+        theta_3d = create_theta_3d()
+        N, C, D, H, W = len(theta_3d), 3, 4, 5, 6
+        data_size = (D, H, W)
+        for align_corners in (0, 1):
+            node = onnx.helper.make_node(
+                "AffineGrid",
+                inputs=["theta", "size"],
+                outputs=["grid"],
+                align_corners=align_corners,
+            )
+
+            original_grid = construct_original_grid(data_size, align_corners)
+            grid = apply_affine_transform(theta_3d, original_grid)
+
+            test_name = "test_affine_grid_3d"
+            if align_corners == 1:
+                test_name += "_align_corners"
+            expect(
+                node,
+                inputs=[theta_3d, np.array([N, C, D, H, W], dtype=np.int64)],
+                outputs=[grid],
+                name=test_name,
+            )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__init__.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d731c65da48ab6a3e2606146b157b4d1d82cfc40
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/array_feature_extractor.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/array_feature_extractor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50503ae6ae44b521444523b24abd6a1b709a58ad
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/array_feature_extractor.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/binarizer.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/binarizer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7faac056380fdd9d44f3d3558e48be5a3c4bec0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/binarizer.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/label_encoder.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/label_encoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a94b5ac2bedce3393a6a3ab3c17b0757df8613c
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/label_encoder.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/tree_ensemble.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/tree_ensemble.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..960c21b34e565985bcfc087852a68d56cde83825
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/__pycache__/tree_ensemble.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/label_encoder.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/label_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..41a29e5a9f429565c3fb62fa917b1c321726a566
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/label_encoder.py
@@ -0,0 +1,101 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.helper import make_tensor
+
+
+class LabelEncoder(Base):
+    @staticmethod
+    def export_string_int_label_encoder() -> None:
+        node = onnx.helper.make_node(
+            "LabelEncoder",
+            inputs=["X"],
+            outputs=["Y"],
+            domain="ai.onnx.ml",
+            keys_strings=["a", "b", "c"],
+            values_int64s=[0, 1, 2],
+            default_int64=42,
+        )
+        x = np.array(["a", "b", "d", "c", "g"]).astype(object)
+        y = np.array([0, 1, 42, 2, 42]).astype(np.int64)
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_ai_onnx_ml_label_encoder_string_int",
+        )
+
+        node = onnx.helper.make_node(
+            "LabelEncoder",
+            inputs=["X"],
+            outputs=["Y"],
+            domain="ai.onnx.ml",
+            keys_strings=["a", "b", "c"],
+            values_int64s=[0, 1, 2],
+        )
+        x = np.array(["a", "b", "d", "c", "g"]).astype(object)
+        y = np.array([0, 1, -1, 2, -1]).astype(np.int64)
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_ai_onnx_ml_label_encoder_string_int_no_default",
+        )
+
+    @staticmethod
+    def export_tensor_based_label_encoder() -> None:
+        tensor_keys = make_tensor(
+            "keys_tensor", onnx.TensorProto.STRING, (3,), ["a", "b", "c"]
+        )
+        repeated_string_keys = ["a", "b", "c"]
+        x = np.array(["a", "b", "d", "c", "g"]).astype(object)
+        y = np.array([0, 1, 42, 2, 42]).astype(np.int16)
+
+        node = onnx.helper.make_node(
+            "LabelEncoder",
+            inputs=["X"],
+            outputs=["Y"],
+            domain="ai.onnx.ml",
+            keys_tensor=tensor_keys,
+            values_tensor=make_tensor(
+                "values_tensor", onnx.TensorProto.INT16, (3,), [0, 1, 2]
+            ),
+            default_tensor=make_tensor(
+                "default_tensor", onnx.TensorProto.INT16, (1,), [42]
+            ),
+        )
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_ai_onnx_ml_label_encoder_tensor_mapping",
+        )
+
+        node = onnx.helper.make_node(
+            "LabelEncoder",
+            inputs=["X"],
+            outputs=["Y"],
+            domain="ai.onnx.ml",
+            keys_strings=repeated_string_keys,
+            values_tensor=make_tensor(
+                "values_tensor", onnx.TensorProto.INT16, (3,), [0, 1, 2]
+            ),
+            default_tensor=make_tensor(
+                "default_tensor", onnx.TensorProto.INT16, (1,), [42]
+            ),
+        )
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_ai_onnx_ml_label_encoder_tensor_value_only_mapping",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bd52dfe5b9b18fbda5140b8b41a54762f28cd11
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ai_onnx_ml/tree_ensemble.py
@@ -0,0 +1,123 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.helper import make_tensor
+
+
+class TreeEnsemble(Base):
+    @staticmethod
+    def export_tree_ensemble_single_tree() -> None:
+        node = onnx.helper.make_node(
+            "TreeEnsemble",
+            ["X"],
+            ["Y"],
+            domain="ai.onnx.ml",
+            n_targets=2,
+            membership_values=None,
+            nodes_missing_value_tracks_true=None,
+            nodes_hitrates=None,
+            aggregate_function=1,
+            post_transform=0,
+            tree_roots=[0],
+            nodes_modes=make_tensor(
+                "nodes_modes",
+                onnx.TensorProto.UINT8,
+                (3,),
+                np.array([0, 0, 0], dtype=np.uint8),
+            ),
+            nodes_featureids=[0, 0, 0],
+            nodes_splits=make_tensor(
+                "nodes_splits",
+                onnx.TensorProto.DOUBLE,
+                (3,),
+                np.array([3.14, 1.2, 4.2], dtype=np.float64),
+            ),
+            nodes_truenodeids=[1, 0, 1],
+            nodes_trueleafs=[0, 1, 1],
+            nodes_falsenodeids=[2, 2, 3],
+            nodes_falseleafs=[0, 1, 1],
+            leaf_targetids=[0, 1, 0, 1],
+            leaf_weights=make_tensor(
+                "leaf_weights",
+                onnx.TensorProto.DOUBLE,
+                (4,),
+                np.array([5.23, 12.12, -12.23, 7.21], dtype=np.float64),
+            ),
+        )
+
+        x = np.array([1.2, 3.4, -0.12, 1.66, 4.14, 1.77], np.float64).reshape(3, 2)
+        y = np.array([[5.23, 0], [5.23, 0], [0, 12.12]], dtype=np.float64)
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_ai_onnx_ml_tree_ensemble_single_tree",
+        )
+
+    @staticmethod
+    def export_tree_ensemble_set_membership() -> None:
+        node = onnx.helper.make_node(
+            "TreeEnsemble",
+            ["X"],
+            ["Y"],
+            domain="ai.onnx.ml",
+            n_targets=4,
+            aggregate_function=1,
+            membership_values=make_tensor(
+                "membership_values",
+                onnx.TensorProto.FLOAT,
+                (8,),
+                [1.2, 3.7, 8, 9, np.nan, 12, 7, np.nan],
+            ),
+            nodes_missing_value_tracks_true=None,
+            nodes_hitrates=None,
+            post_transform=0,
+            tree_roots=[0],
+            nodes_modes=make_tensor(
+                "nodes_modes",
+                onnx.TensorProto.UINT8,
+                (3,),
+                np.array([0, 6, 6], dtype=np.uint8),
+            ),
+            nodes_featureids=[0, 0, 0],
+            nodes_splits=make_tensor(
+                "nodes_splits",
+                onnx.TensorProto.FLOAT,
+                (3,),
+                np.array([11, 232344.0, np.nan], dtype=np.float32),
+            ),
+            nodes_trueleafs=[0, 1, 1],
+            nodes_truenodeids=[1, 0, 1],
+            nodes_falseleafs=[1, 0, 1],
+            nodes_falsenodeids=[2, 2, 3],
+            leaf_targetids=[0, 1, 2, 3],
+            leaf_weights=make_tensor(
+                "leaf_weights", onnx.TensorProto.FLOAT, (4,), [1, 10, 1000, 100]
+            ),
+        )
+
+        x = np.array([1.2, 3.4, -0.12, np.nan, 12, 7], np.float32).reshape(-1, 1)
+        expected = np.array(
+            [
+                [1, 0, 0, 0],
+                [0, 0, 0, 100],
+                [0, 0, 0, 100],
+                [0, 0, 1000, 0],
+                [0, 0, 1000, 0],
+                [0, 10, 0, 0],
+            ],
+            dtype=np.float32,
+        )
+        expect(
+            node,
+            inputs=[x],
+            outputs=[expected],
+            name="test_ai_onnx_ml_tree_ensemble_set_membership",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/and.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/and.py
new file mode 100644
index 0000000000000000000000000000000000000000..dda42aff372e4255d1e553a80c8200444411fef0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/and.py
@@ -0,0 +1,76 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class And(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "And",
+            inputs=["x", "y"],
+            outputs=["and"],
+        )
+
+        # 2d
+        x = (np.random.randn(3, 4) > 0).astype(bool)
+        y = (np.random.randn(3, 4) > 0).astype(bool)
+        z = np.logical_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_and2d")
+
+        # 3d
+        x = (np.random.randn(3, 4, 5) > 0).astype(bool)
+        y = (np.random.randn(3, 4, 5) > 0).astype(bool)
+        z = np.logical_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_and3d")
+
+        # 4d
+        x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
+        y = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
+        z = np.logical_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_and4d")
+
+    @staticmethod
+    def export_and_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "And",
+            inputs=["x", "y"],
+            outputs=["and"],
+        )
+
+        # 3d vs 1d
+        x = (np.random.randn(3, 4, 5) > 0).astype(bool)
+        y = (np.random.randn(5) > 0).astype(bool)
+        z = np.logical_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast3v1d")
+
+        # 3d vs 2d
+        x = (np.random.randn(3, 4, 5) > 0).astype(bool)
+        y = (np.random.randn(4, 5) > 0).astype(bool)
+        z = np.logical_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast3v2d")
+
+        # 4d vs 2d
+        x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
+        y = (np.random.randn(5, 6) > 0).astype(bool)
+        z = np.logical_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast4v2d")
+
+        # 4d vs 3d
+        x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
+        y = (np.random.randn(4, 5, 6) > 0).astype(bool)
+        z = np.logical_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast4v3d")
+
+        # 4d vs 4d
+        x = (np.random.randn(1, 4, 1, 6) > 0).astype(bool)
+        y = (np.random.randn(3, 1, 5, 6) > 0).astype(bool)
+        z = np.logical_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_and_bcast4v4d")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/argmax.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/argmax.py
new file mode 100644
index 0000000000000000000000000000000000000000..171a6cc8d776e4c399822b1808765da771031036
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/argmax.py
@@ -0,0 +1,256 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def argmax_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1) -> np.ndarray:
+    result = np.argmax(data, axis=axis)
+    if keepdims == 1:
+        result = np.expand_dims(result, axis)
+    return result.astype(np.int64)
+
+
+def argmax_use_numpy_select_last_index(
+    data: np.ndarray, axis: int = 0, keepdims: int = True
+) -> np.ndarray:
+    data = np.flip(data, axis)
+    result = np.argmax(data, axis=axis)
+    result = data.shape[axis] - result - 1
+    if keepdims:
+        result = np.expand_dims(result, axis)
+    return result.astype(np.int64)
+
+
+class ArgMax(Base):
+    @staticmethod
+    def export_no_keepdims() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        axis = 1
+        keepdims = 0
+        node = onnx.helper.make_node(
+            "ArgMax", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims
+        )
+        # result: [0, 1]
+        result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_no_keepdims_example",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 4]
+        result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node, inputs=[data], outputs=[result], name="test_argmax_no_keepdims_random"
+        )
+
+    @staticmethod
+    def export_keepdims() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        axis = 1
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMax", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims
+        )
+        # result: [[0], [1]]
+        result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node, inputs=[data], outputs=[result], name="test_argmax_keepdims_example"
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 1, 4]
+        result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node, inputs=[data], outputs=[result], name="test_argmax_keepdims_random"
+        )
+
+    @staticmethod
+    def export_default_axes_keepdims() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMax", inputs=["data"], outputs=["result"], keepdims=keepdims
+        )
+
+        # result: [[1, 1]]
+        result = argmax_use_numpy(data, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_default_axis_example",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [1, 3, 4]
+        result = argmax_use_numpy(data, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_default_axis_random",
+        )
+
+    @staticmethod
+    def export_negative_axis_keepdims() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        axis = -1
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMax", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims
+        )
+        # result: [[0], [1]]
+        result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_negative_axis_keepdims_example",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 3, 1]
+        result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_negative_axis_keepdims_random",
+        )
+
+    @staticmethod
+    def export_no_keepdims_select_last_index() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        axis = 1
+        keepdims = 0
+        node = onnx.helper.make_node(
+            "ArgMax",
+            inputs=["data"],
+            outputs=["result"],
+            axis=axis,
+            keepdims=keepdims,
+            select_last_index=True,
+        )
+        # result: [1, 1]
+        result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_no_keepdims_example_select_last_index",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 4]
+        result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_no_keepdims_random_select_last_index",
+        )
+
+    @staticmethod
+    def export_keepdims_select_last_index() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        axis = 1
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMax",
+            inputs=["data"],
+            outputs=["result"],
+            axis=axis,
+            keepdims=keepdims,
+            select_last_index=True,
+        )
+        # result: [[1], [1]]
+        result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_keepdims_example_select_last_index",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 1, 4]
+        result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_keepdims_random_select_last_index",
+        )
+
+    @staticmethod
+    def export_default_axes_keepdims_select_last_index() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMax",
+            inputs=["data"],
+            outputs=["result"],
+            keepdims=keepdims,
+            select_last_index=True,
+        )
+
+        # result: [[1, 1]]
+        result = argmax_use_numpy_select_last_index(data, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_default_axis_example_select_last_index",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [1, 3, 4]
+        result = argmax_use_numpy_select_last_index(data, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_default_axis_random_select_last_index",
+        )
+
+    @staticmethod
+    def export_negative_axis_keepdims_select_last_index() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        axis = -1
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMax",
+            inputs=["data"],
+            outputs=["result"],
+            axis=axis,
+            keepdims=keepdims,
+            select_last_index=True,
+        )
+        # result: [[1], [1]]
+        result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_negative_axis_keepdims_example_select_last_index",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 3, 1]
+        result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmax_negative_axis_keepdims_random_select_last_index",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/argmin.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/argmin.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b1bf2e959a999ecc5edd8ab124e94b988b076a0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/argmin.py
@@ -0,0 +1,256 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def argmin_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1) -> np.ndarray:
+    result = np.argmin(data, axis=axis)
+    if keepdims == 1:
+        result = np.expand_dims(result, axis)
+    return result.astype(np.int64)
+
+
+def argmin_use_numpy_select_last_index(
+    data: np.ndarray, axis: int = 0, keepdims: int = True
+) -> np.ndarray:
+    data = np.flip(data, axis)
+    result = np.argmin(data, axis=axis)
+    result = data.shape[axis] - result - 1
+    if keepdims:
+        result = np.expand_dims(result, axis)
+    return result.astype(np.int64)
+
+
+class ArgMin(Base):
+    @staticmethod
+    def export_no_keepdims() -> None:
+        data = np.array([[2, 1], [3, 10]], dtype=np.float32)
+        axis = 1
+        keepdims = 0
+        node = onnx.helper.make_node(
+            "ArgMin", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims
+        )
+        # The content of result is : [[1, 0]]
+        result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_no_keepdims_example",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 4]
+        result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node, inputs=[data], outputs=[result], name="test_argmin_no_keepdims_random"
+        )
+
+    @staticmethod
+    def export_keepdims() -> None:
+        data = np.array([[2, 1], [3, 10]], dtype=np.float32)
+        axis = 1
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMin", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims
+        )
+        # The content of result is : [[1], [0]]
+        result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node, inputs=[data], outputs=[result], name="test_argmin_keepdims_example"
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 1, 4]
+        result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node, inputs=[data], outputs=[result], name="test_argmin_keepdims_random"
+        )
+
+    @staticmethod
+    def export_default_axes_keepdims() -> None:
+        data = np.array([[2, 1], [3, 10]], dtype=np.float32)
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMin", inputs=["data"], outputs=["result"], keepdims=keepdims
+        )
+
+        # The content of result is : [[0], [0]]
+        result = argmin_use_numpy(data, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_default_axis_example",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [1, 3, 4]
+        result = argmin_use_numpy(data, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_default_axis_random",
+        )
+
+    @staticmethod
+    def export_negative_axis_keepdims() -> None:
+        data = np.array([[2, 1], [3, 10]], dtype=np.float32)
+        axis = -1
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMin", inputs=["data"], outputs=["result"], axis=axis, keepdims=keepdims
+        )
+        # The content of result is : [[1], [0]]
+        result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_negative_axis_keepdims_example",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 3, 1]
+        result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_negative_axis_keepdims_random",
+        )
+
+    @staticmethod
+    def export_no_keepdims_select_last_index() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        axis = 1
+        keepdims = 0
+        node = onnx.helper.make_node(
+            "ArgMin",
+            inputs=["data"],
+            outputs=["result"],
+            axis=axis,
+            keepdims=keepdims,
+            select_last_index=True,
+        )
+        # result: [[1, 0]]
+        result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_no_keepdims_example_select_last_index",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 4]
+        result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_no_keepdims_random_select_last_index",
+        )
+
+    @staticmethod
+    def export_keepdims_select_last_index() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        axis = 1
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMin",
+            inputs=["data"],
+            outputs=["result"],
+            axis=axis,
+            keepdims=keepdims,
+            select_last_index=True,
+        )
+        # result: [[1], [0]]
+        result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_keepdims_example_select_last_index",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 1, 4]
+        result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_keepdims_random_select_last_index",
+        )
+
+    @staticmethod
+    def export_default_axes_keepdims_select_last_index() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMin",
+            inputs=["data"],
+            outputs=["result"],
+            keepdims=keepdims,
+            select_last_index=True,
+        )
+
+        # result: [[0, 0]]
+        result = argmin_use_numpy_select_last_index(data, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_default_axis_example_select_last_index",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [1, 3, 4]
+        result = argmin_use_numpy_select_last_index(data, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_default_axis_random_select_last_index",
+        )
+
+    @staticmethod
+    def export_negative_axis_keepdims_select_last_index() -> None:
+        data = np.array([[2, 2], [3, 10]], dtype=np.float32)
+        axis = -1
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ArgMin",
+            inputs=["data"],
+            outputs=["result"],
+            axis=axis,
+            keepdims=keepdims,
+            select_last_index=True,
+        )
+        # result: [[1], [0]]
+        result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_negative_axis_keepdims_example_select_last_index",
+        )
+
+        data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
+        # result's shape: [2, 3, 1]
+        result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
+        expect(
+            node,
+            inputs=[data],
+            outputs=[result],
+            name="test_argmin_negative_axis_keepdims_random_select_last_index",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/asin.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/asin.py
new file mode 100644
index 0000000000000000000000000000000000000000..e46cdf3715f4f4fc92f269f40f6b6964570506c6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/asin.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Asin(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Asin",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-0.5, 0, 0.5]).astype(np.float32)
+        y = np.arcsin(x)
+        expect(node, inputs=[x], outputs=[y], name="test_asin_example")
+
+        x = np.random.rand(3, 4, 5).astype(np.float32)
+        y = np.arcsin(x)
+        expect(node, inputs=[x], outputs=[y], name="test_asin")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/asinh.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/asinh.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4674cf3b171c924ffdb6c5d09030b19a6d3c3b3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/asinh.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Asinh(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Asinh",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.arcsinh(x)  # expected output [-0.88137358,  0.,  0.88137358]
+        expect(node, inputs=[x], outputs=[y], name="test_asinh_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.arcsinh(x)
+        expect(node, inputs=[x], outputs=[y], name="test_asinh")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/atan.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/atan.py
new file mode 100644
index 0000000000000000000000000000000000000000..8eaef5a5986ac2bcb4182a1586a3d89095bb3f37
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/atan.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Atan(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Atan",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.arctan(x)
+        expect(node, inputs=[x], outputs=[y], name="test_atan_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.arctan(x)
+        expect(node, inputs=[x], outputs=[y], name="test_atan")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/atanh.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/atanh.py
new file mode 100644
index 0000000000000000000000000000000000000000..57be78489921e0474946e85de51d86e1a8a75ce4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/atanh.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Atanh(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Atanh",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-0.5, 0, 0.5]).astype(np.float32)
+        y = np.arctanh(x)  # expected output [-0.54930615,  0.,  0.54930615]
+        expect(node, inputs=[x], outputs=[y], name="test_atanh_example")
+
+        x = np.random.uniform(0.0, 1.0, (3, 4, 5)).astype(np.float32)
+        y = np.arctanh(x)
+        expect(node, inputs=[x], outputs=[y], name="test_atanh")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/attention.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..a26bd2691640006e7abe2fe73f160aece3743f40
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/attention.py
@@ -0,0 +1,1874 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.reference.ops.op_attention import _compute_attention
+
+
+class Attention(Base):
+    @staticmethod
+    def export_attention() -> None:
+        node = onnx.helper.make_node("Attention", inputs=["Q", "K", "V"], outputs=["Y"])
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d",
+        )
+
+    @staticmethod
+    def export_attention_fp16() -> None:
+        node = onnx.helper.make_node("Attention", inputs=["Q", "K", "V"], outputs=["Y"])
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float16)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float16)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float16)
+
+        Y, _, _, _ = _compute_attention(Q, K, V)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_fp16",
+        )
+
+    @staticmethod
+    def export_attention_gqa() -> None:
+        node = onnx.helper.make_node("Attention", inputs=["Q", "K", "V"], outputs=["Y"])
+
+        Q = np.random.rand(2, 9, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_gqa",
+        )
+
+    @staticmethod
+    def export_attention_diff_head_sizes() -> None:
+        node = onnx.helper.make_node("Attention", inputs=["Q", "K", "V"], outputs=["Y"])
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 10).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_diff_heads_sizes",
+        )
+
+    @staticmethod
+    def export_attention_scaled() -> None:
+        scale = 1e-2
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            scale=scale,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V, scale=scale)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_scaled",
+        )
+
+    @staticmethod
+    def export_attention_gqa_scaled() -> None:
+        scale = 1e-2
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            scale=scale,
+        )
+
+        Q = np.random.rand(2, 9, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V, scale=scale)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_gqa_scaled",
+        )
+
+    @staticmethod
+    def export_attention_diff_head_sizes_scaled() -> None:
+        scale = 1e-2
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            scale=scale,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 10).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V, scale=scale)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_diff_heads_sizes_scaled",
+        )
+
+    @staticmethod
+    def export_attention_causal() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            is_causal=1,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V, is_causal=1)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_causal",
+        )
+
+    @staticmethod
+    def export_attention_gqa_causal() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            is_causal=1,
+        )
+
+        Q = np.random.rand(2, 9, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V, is_causal=1)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_gqa_causal",
+        )
+
+    @staticmethod
+    def export_attention_diff_head_sizes_causal() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            is_causal=1,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 10).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            is_causal=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_diff_heads_sizes_causal",
+        )
+
+    @staticmethod
+    def export_attention_attn_mask() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_4d_attn_mask",
+        )
+
+    @staticmethod
+    def export_attention_attn_3d_mask() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(2, 1, 4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_4d_attn_mask_3d",
+        )
+
+    @staticmethod
+    def export_attention_attn_3d_mask_causal() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+            is_causal=1,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(2, 1, 4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            is_causal=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_4d_attn_mask_3d_causal",
+        )
+
+    @staticmethod
+    def export_attention_attn_4d_mask() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(2, 3, 4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_4d_attn_mask_4d",
+        )
+
+    @staticmethod
+    def export_attention_attn_4d_mask_causal() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+            is_causal=1,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(2, 3, 4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            is_causal=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_4d_attn_mask_4d_causal",
+        )
+
+    @staticmethod
+    def export_attention_attn_mask_bool() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(bool)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_4d_attn_mask_bool",
+        )
+
+    @staticmethod
+    def export_attention_attn_mask_bool_4d() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(2, 3, 4, 6).astype(bool)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_4d_attn_mask_bool_4d",
+        )
+
+    @staticmethod
+    def export_attention_gqa_attn_mask() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+        )
+
+        Q = np.random.rand(2, 9, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_4d_gqa_attn_mask",
+        )
+
+    @staticmethod
+    def export_attention_diff_head_sizes_attn_mask() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 10).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_4d_diff_heads_sizes_attn_mask",
+        )
+
+    @staticmethod
+    def export_attention_with_past_and_present() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value"],
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value],
+            name="test_attention_4d_with_past_and_present",
+        )
+
+    @staticmethod
+    def export_attention_gqa_with_past_and_present() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value"],
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 9, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value],
+            name="test_attention_4d_gqa_with_past_and_present",
+        )
+
+    @staticmethod
+    def export_attention_gqa_with_past_and_present_fp16() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value"],
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 9, 4, 8).astype(np.float16)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float16)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float16)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float16)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float16)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float16)
+
+        Y, present_key, present_value, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value],
+            name="test_attention_4d_gqa_with_past_and_present_fp16",
+        )
+
+    @staticmethod
+    def export_attention_diff_head_sizes_with_past_and_present() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value"],
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 10).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 10).astype(np.float32)
+
+        Y, present_key, present_value, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value],
+            name="test_attention_4d_diff_heads_with_past_and_present",
+        )
+
+    @staticmethod
+    def export_attention_diff_head_sizes_with_past_and_present_mask3D() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value"],
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 10).astype(np.float32)
+        attn_mask = np.random.rand(2, 1, 4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 10).astype(np.float32)
+
+        Y, present_key, present_value, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value],
+            name="test_attention_4d_diff_heads_with_past_and_present_mask3d",
+        )
+
+    @staticmethod
+    def export_attention_diff_head_sizes_with_past_and_present_mask4D() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value"],
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 10).astype(np.float32)
+        attn_mask = np.random.rand(2, 3, 4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 10).astype(np.float32)
+
+        Y, present_key, present_value, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value],
+            name="test_attention_4d_diff_heads_with_past_and_present_mask4d",
+        )
+
+    @staticmethod
+    def export_attention_softcap() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            softcap=2.0,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V, softcap=2.0)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_softcap",
+        )
+
+    @staticmethod
+    def export_attention_gqa_softcap() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            softcap=2.0,
+        )
+
+        Q = np.random.rand(2, 9, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(Q, K, V, softcap=2.0)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_gqa_softcap",
+        )
+
+    @staticmethod
+    def export_attention_diff_head_sizes_softcap() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            softcap=2.0,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 10).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            softcap=2.0,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_4d_diff_heads_sizes_softcap",
+        )
+
+    @staticmethod
+    def export_attention_with_qk_matmul() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y", "", "", "qk_matmul_output"],
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+
+        Y, _, _, qk_matmul_output = _compute_attention(Q, K, V)
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y, qk_matmul_output],
+            name="test_attention_4d_with_qk_matmul",
+        )
+
+    @staticmethod
+    def export_attention_with_qk_matmul_bias() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y", "", "", "qk_matmul_output"],
+            qk_matmul_output_mode=1,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(np.float32)
+
+        Y, _, _, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            qk_matmul_output_mode=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y, qk_matmul_output],
+            name="test_attention_4d_with_qk_matmul_bias",
+        )
+
+    @staticmethod
+    def export_attention_with_qk_matmul_softcap() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y", "", "", "qk_matmul_output"],
+            softcap=2.0,
+            qk_matmul_output_mode=2,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(np.float32)
+
+        Y, _, _, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            softcap=2.0,
+            qk_matmul_output_mode=2,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y, qk_matmul_output],
+            name="test_attention_4d_with_qk_matmul_softcap",
+        )
+
+    @staticmethod
+    def export_attention_with_qk_matmul_softmax() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y", "", "", "qk_matmul_output"],
+            qk_matmul_output_mode=3,
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(np.float32)
+
+        Y, _, _, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            qk_matmul_output_mode=3,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y, qk_matmul_output],
+            name="test_attention_4d_with_qk_matmul_softmax",
+        )
+
+    @staticmethod
+    def export_attention_with_past_and_present_qk_matmul_bias() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+            qk_matmul_output_mode=1,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            qk_matmul_output_mode=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_4d_with_past_and_present_qk_matmul_bias",
+        )
+
+    @staticmethod
+    def export_attention_with_past_and_present_qk_matmul_bias_3d_mask() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+            qk_matmul_output_mode=1,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(2, 1, 4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            qk_matmul_output_mode=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_4d_with_past_and_present_qk_matmul_bias_3d_mask",
+        )
+
+    @staticmethod
+    def export_attention_with_past_and_present_qk_matmul_bias_4d_mask() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+            qk_matmul_output_mode=1,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(2, 3, 4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            qk_matmul_output_mode=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_4d_with_past_and_present_qk_matmul_bias_4d_mask",
+        )
+
+    @staticmethod
+    def export_attention_with_past_and_present_qk_matmul_bias_3d_mask_causal() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+            qk_matmul_output_mode=1,
+            is_causal=1,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(2, 1, 4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            qk_matmul_output_mode=1,
+            is_causal=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_4d_with_past_and_present_qk_matmul_bias_3d_mask_causal",
+        )
+
+    @staticmethod
+    def export_attention_with_past_and_present_qk_matmul_bias_4d_mask_causal() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+            qk_matmul_output_mode=1,
+            is_causal=1,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(2, 3, 4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            qk_matmul_output_mode=1,
+            is_causal=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_4d_with_past_and_present_qk_matmul_bias_4d_mask_causal",
+        )
+
+    @staticmethod
+    def export_attention_with_past_and_present_qk_matmul() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_4d_with_past_and_present_qk_matmul",
+        )
+
+    @staticmethod
+    def export_attention_3d() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d",
+        )
+
+    @staticmethod
+    def export_attention_3d_gqa() -> None:
+        q_num_heads, kv_num_heads = 9, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 72).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_gqa",
+        )
+
+    @staticmethod
+    def export_attention_3d_diff_head_sizes() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 30).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_diff_heads_sizes",
+        )
+
+    @staticmethod
+    def export_attention_3d_scaled() -> None:
+        scale = 1e-2
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            scale=scale,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            scale=scale,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_scaled",
+        )
+
+    @staticmethod
+    def export_attention_3d_gqa_scaled() -> None:
+        scale = 1e-2
+        q_num_heads, kv_num_heads = 9, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            scale=scale,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 72).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            scale=scale,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_gqa_scaled",
+        )
+
+    @staticmethod
+    def export_attention_3d_diff_head_sizes_scaled() -> None:
+        scale = 1e-2
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            scale=scale,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 30).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            scale=scale,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_diff_heads_sizes_scaled",
+        )
+
+    @staticmethod
+    def export_attention_3d_causal() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            is_causal=1,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            is_causal=1,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_causal",
+        )
+
+    @staticmethod
+    def export_attention_3d_gqa_causal() -> None:
+        q_num_heads, kv_num_heads = 9, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            is_causal=1,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 72).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            is_causal=1,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_gqa_causal",
+        )
+
+    @staticmethod
+    def export_attention_3d_diff_head_sizes_causal() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            is_causal=1,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 30).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            is_causal=1,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_diff_heads_sizes_causal",
+        )
+
+    @staticmethod
+    def export_attention_3d_attn_mask() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_3d_attn_mask",
+        )
+
+    @staticmethod
+    def export_attention_3d_gqa_attn_mask() -> None:
+        q_num_heads, kv_num_heads = 9, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 72).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_3d_gqa_attn_mask",
+        )
+
+    @staticmethod
+    def export_attention_3d_diff_head_sizes_attn_mask() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask"],
+            outputs=["Y"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 30).astype(np.float32)
+        attn_mask = np.random.rand(4, 6).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask],
+            outputs=[Y],
+            name="test_attention_3d_diff_heads_sizes_attn_mask",
+        )
+
+    @staticmethod
+    def export_attention_3d_softcap() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            softcap=3.0,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            softcap=3.0,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_softcap",
+        )
+
+    @staticmethod
+    def export_attention_3d_gqa_softcap() -> None:
+        q_num_heads, kv_num_heads = 9, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            softcap=3.0,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 72).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            softcap=3.0,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_gqa_softcap",
+        )
+
+    @staticmethod
+    def export_attention_3d_diff_head_sizes_softcap() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            softcap=3.0,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 30).astype(np.float32)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            softcap=3.0,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_diff_heads_sizes_softcap",
+        )
+
+    @staticmethod
+    def export_attention_3d_with_past_and_present() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value],
+            name="test_attention_3d_with_past_and_present",
+        )
+
+    @staticmethod
+    def export_attention_3d_gqa_with_past_and_present() -> None:
+        q_num_heads, kv_num_heads = 9, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 4, 72).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value],
+            name="test_attention_3d_gqa_with_past_and_present",
+        )
+
+    @staticmethod
+    def export_attention_3d_diff_head_sizes_with_past_and_present() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 30).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 10).astype(np.float32)
+
+        Y, present_key, present_value, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value],
+            name="test_attention_3d_diff_heads_with_past_and_present",
+        )
+
+    @staticmethod
+    def export_attention_3d_with_past_and_present_qk_matmul() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_3d_with_past_and_present_qk_matmul",
+        )
+
+    @staticmethod
+    def export_attention_3d_with_past_and_present_qk_matmul_bias() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+            qk_matmul_output_mode=1,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+            qk_matmul_output_mode=1,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_3d_with_past_and_present_qk_matmul_bias",
+        )
+
+    @staticmethod
+    def export_attention_3d_with_past_and_present_qk_matmul_softcap() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+            softcap=2.0,
+            qk_matmul_output_mode=2,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+            softcap=2.0,
+            qk_matmul_output_mode=2,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_3d_with_past_and_present_qk_matmul_softcap",
+        )
+
+    @staticmethod
+    def export_attention_3d_with_past_and_present_qk_matmul_softmax() -> None:
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "past_key", "past_value"],
+            outputs=["Y", "present_key", "present_value", "qk_matmul_output"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+            qk_matmul_output_mode=3,
+        )
+
+        past_sequence_length = 12
+        Q = np.random.rand(2, 4, 24).astype(np.float32)
+        K = np.random.rand(2, 6, 24).astype(np.float32)
+        V = np.random.rand(2, 6, 24).astype(np.float32)
+        attn_mask = np.random.rand(4, 6 + past_sequence_length).astype(np.float32)
+        past_key = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+        past_value = np.random.rand(2, 3, past_sequence_length, 8).astype(np.float32)
+
+        Y, present_key, present_value, qk_matmul_output = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            past_key=past_key,
+            past_value=past_value,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+            qk_matmul_output_mode=3,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, past_key, past_value],
+            outputs=[Y, present_key, present_value, qk_matmul_output],
+            name="test_attention_3d_with_past_and_present_qk_matmul_softmax",
+        )
+
+    @staticmethod
+    def export_attention_3d_transpose_verification() -> None:
+        """Test case to verify correct 3D to 4D transpose behavior.
+
+        This test verifies that 3D inputs are correctly reshaped and transposed
+        according to the ONNX specification:
+        [batch_size, seq_length, hidden_size] ->
+        [batch_size, seq_length, num_heads, head_size] ->
+        [batch_size, num_heads, seq_length, head_size]
+        """
+        q_num_heads, kv_num_heads = 3, 3
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V"],
+            outputs=["Y"],
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        # Test inputs that will clearly demonstrate the transpose behavior
+        batch_size = 1
+        q_seq_length = 2
+        kv_seq_length = 2
+        head_size = 4
+        q_hidden_size = q_num_heads * head_size  # 3 * 4 = 12
+        kv_hidden_size = kv_num_heads * head_size  # 3 * 4 = 12
+
+        # Create structured inputs to verify correct transpose behavior
+        # Q has a pattern where each position in hidden dimension has a specific value
+        Q = np.zeros((batch_size, q_seq_length, q_hidden_size), dtype=np.float32)
+        # Fill Q with pattern: head0=[1,1,1,1], head1=[2,2,2,2], head2=[3,3,3,3]
+        for head in range(q_num_heads):
+            start_idx = head * head_size
+            end_idx = start_idx + head_size
+            Q[0, :, start_idx:end_idx] = float(head + 1)
+
+        K = np.ones((batch_size, kv_seq_length, kv_hidden_size), dtype=np.float32) * 0.1
+        V = np.ones((batch_size, kv_seq_length, kv_hidden_size), dtype=np.float32) * 0.1
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            q_num_heads=q_num_heads,
+            kv_num_heads=kv_num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V],
+            outputs=[Y],
+            name="test_attention_3d_transpose_verification",
+        )
+
+    @staticmethod
+    def export_attention_4d_diff_heads_mask4d_padded_kv() -> None:
+        node = onnx.helper.make_node(
+            "Attention",
+            inputs=["Q", "K", "V", "attn_mask", "", "", "nonpad_kv_seqlen"],
+            outputs=["Y"],
+        )
+
+        Q = np.random.rand(2, 3, 4, 8).astype(np.float32)
+        K = np.random.rand(2, 3, 6, 8).astype(np.float32)
+        V = np.random.rand(2, 3, 6, 10).astype(np.float32)
+        attn_mask = np.random.rand(2, 3, 4, 4).astype(np.float32)
+        nonpad_kv_seqlen = np.array([3, 4], dtype=np.int64)
+
+        Y, _, _, _ = _compute_attention(
+            Q,
+            K,
+            V,
+            attn_mask=attn_mask,
+            nonpad_kv_seqlen=nonpad_kv_seqlen,
+        )
+
+        expect(
+            node,
+            inputs=[Q, K, V, attn_mask, nonpad_kv_seqlen],
+            outputs=[Y],
+            name="test_attention_4d_diff_heads_mask4d_padded_kv",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/averagepool.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/averagepool.py
new file mode 100644
index 0000000000000000000000000000000000000000..22b806a63f6dd9b2f424cb8c07caacc1d625a373
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/averagepool.py
@@ -0,0 +1,695 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.reference.ops.op_pool_common import (
+    get_output_shape_auto_pad,
+    get_output_shape_explicit_padding,
+    get_pad_shape,
+    pool,
+)
+
+
+class AveragePool(Base):
+    @staticmethod
+    def export_averagepool_2d_precomputed_pads() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 5, 5]
+        pad_shape: [4, 4] -> [2, 2, 2, 2] by axis
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[5, 5],
+            pads=[2, 2, 2, 2],
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array(
+            [
+                [
+                    [
+                        [7, 7.5, 8, 8.5, 9],
+                        [9.5, 10, 10.5, 11, 11.5],
+                        [12, 12.5, 13, 13.5, 14],
+                        [14.5, 15, 15.5, 16, 16.5],
+                        [17, 17.5, 18, 18.5, 19],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+
+        expect(
+            node, inputs=[x], outputs=[y], name="test_averagepool_2d_precomputed_pads"
+        )
+
+    @staticmethod
+    def export_averagepool_2d_precomputed_pads_count_include_pad() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 5, 5]
+        pad_shape: [4, 4] -> [2, 2, 2, 2] by axis
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[5, 5],
+            pads=[2, 2, 2, 2],
+            count_include_pad=1,
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array(
+            [
+                [
+                    [
+                        [2.5200, 3.6000, 4.8000, 4.0800, 3.2400],
+                        [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],
+                        [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],
+                        [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],
+                        [6.1200, 8.4000, 10.8000, 8.8800, 6.8400],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_averagepool_2d_precomputed_pads_count_include_pad",
+        )
+
+    @staticmethod
+    def export_averagepool_2d_precomputed_strides() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 2, 2]
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            strides=[2, 2],
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[4, 6], [14, 16]]]]).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_averagepool_2d_precomputed_strides",
+        )
+
+    @staticmethod
+    def export_averagepool_2d_precomputed_same_upper() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 3, 3]
+        pad_shape: [2, 2] -> [1, 1, 1, 1] by axis
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[3, 3],
+            strides=[2, 2],
+            auto_pad="SAME_UPPER",
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[4, 5.5, 7], [11.5, 13, 14.5], [19, 20.5, 22]]]]).astype(
+            np.float32
+        )
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_averagepool_2d_precomputed_same_upper",
+        )
+
+    @staticmethod
+    def export_averagepool_1d_default() -> None:
+        """input_shape: [1, 3, 32]
+        output_shape: [1, 3, 31]
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2],
+        )
+        x = np.random.randn(1, 3, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = [2]
+        strides = [1]
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG")
+
+        expect(node, inputs=[x], outputs=[y], name="test_averagepool_1d_default")
+
+    @staticmethod
+    def export_averagepool_2d_default() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 31, 31]
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = (2, 2)
+        strides = (1, 1)
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG")
+
+        expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_default")
+
+    @staticmethod
+    def export_averagepool_3d_default() -> None:
+        """input_shape: [1, 3, 32, 32, 32]
+        output_shape: [1, 3, 31, 31, 31]
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2, 2],
+        )
+        x = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = [2, 2, 2]
+        strides = [1, 1, 1]
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "AVG")
+
+        expect(node, inputs=[x], outputs=[y], name="test_averagepool_3d_default")
+
+    @staticmethod
+    def export_averagepool_2d_same_upper() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 32, 32]
+        pad_shape: [1, 1] -> [0, 1, 0, 1] by axis
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            auto_pad="SAME_UPPER",
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (2, 2)
+        strides = (1, 1)
+        out_shape = get_output_shape_auto_pad(
+            "SAME_UPPER", x_shape[2:], kernel_shape, strides
+        )
+        pad_shape = get_pad_shape(
+            "SAME_UPPER", x_shape[2:], kernel_shape, strides, out_shape
+        )
+        pad_top = pad_shape[0] // 2
+        pad_bottom = pad_shape[0] - pad_top
+        pad_left = pad_shape[1] // 2
+        pad_right = pad_shape[1] - pad_left
+        padded = np.pad(
+            x,
+            ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
+            mode="constant",
+            constant_values=np.nan,
+        )
+        pads = (pad_top, pad_left, pad_bottom, pad_right)
+        y = pool(
+            padded,
+            x_shape,
+            kernel_shape,
+            strides,
+            out_shape,
+            "AVG",
+            pads_required=pads,
+            pads=pads,
+        )
+
+        expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_same_upper")
+
+    @staticmethod
+    def export_averagepool_2d_same_lower() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 32, 32]
+        pad_shape: [1, 1] -> [1, 0, 1, 0] by axis
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            auto_pad="SAME_LOWER",
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (2, 2)
+        strides = (1, 1)
+        out_shape = get_output_shape_auto_pad(
+            "SAME_LOWER", x_shape[2:], kernel_shape, strides
+        )
+        pad_shape = get_pad_shape(
+            "SAME_LOWER", x_shape[2:], kernel_shape, strides, out_shape
+        )
+        pad_bottom = pad_shape[0] // 2
+        pad_top = pad_shape[0] - pad_bottom
+        pad_right = pad_shape[1] // 2
+        pad_left = pad_shape[1] - pad_right
+        padded = np.pad(
+            x,
+            ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
+            mode="constant",
+            constant_values=np.nan,
+        )
+        pads = (pad_top, pad_left, pad_bottom, pad_right)
+        y = pool(
+            padded,
+            x_shape,
+            kernel_shape,
+            strides,
+            out_shape,
+            "AVG",
+            pads_required=pads,
+            pads=pads,
+        )
+
+        expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_same_lower")
+
+    @staticmethod
+    def export_averagepool_2d_pads() -> None:
+        """input_shape: [1, 3, 28, 28]
+        output_shape: [1, 3, 30, 30]
+        pad_shape: [4, 4] -> [2, 2, 2, 2] by axis
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[3, 3],
+            pads=[2, 2, 2, 2],
+        )
+        x = np.random.randn(1, 3, 28, 28).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (3, 3)
+        strides = (1, 1)
+        pad_bottom = 2
+        pad_top = 2
+        pad_right = 2
+        pad_left = 2
+        pads = [pad_top, pad_left, pad_bottom, pad_right]
+        out_shape, extra_pads = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides, ceil_mode=False
+        )
+        padded = np.pad(
+            x,
+            (
+                (0, 0),
+                (0, 0),
+                (extra_pads[0], extra_pads[2]),
+                (extra_pads[1], extra_pads[3]),
+            ),
+            mode="constant",
+            constant_values=np.nan,
+        )
+        y = pool(
+            padded,
+            x_shape,
+            kernel_shape,
+            strides,
+            out_shape,
+            "AVG",
+            pads_required=extra_pads,
+            pads=pads,
+        )
+
+        expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_pads")
+
+    @staticmethod
+    def export_averagepool_2d_pads_count_include_pad() -> None:
+        """input_shape: [1, 3, 28, 28]
+        output_shape: [1, 3, 30, 30]
+        pad_shape: [4, 4] -> [2, 2, 2, 2] by axis
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[3, 3],
+            pads=[2, 2, 2, 2],
+            count_include_pad=1,
+        )
+        x = np.random.randn(1, 3, 28, 28).astype(np.float32)
+        x_shape = np.shape(x)
+        dilations = (1, 1)
+        kernel_shape = (3, 3)
+        strides = (1, 1)
+        pad_bottom = 2
+        pad_top = 2
+        pad_right = 2
+        pad_left = 2
+        pads = [pad_top, pad_left, pad_bottom, pad_right]
+        out_shape, extra_pads = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides, dilations, ceil_mode=False
+        )
+        padded = np.pad(
+            x,
+            (
+                (0, 0),
+                (0, 0),
+                (extra_pads[0], extra_pads[2]),
+                (extra_pads[1], extra_pads[3]),
+            ),
+            mode="constant",
+            constant_values=0,
+        )
+        y = pool(
+            padded,
+            x_shape,
+            kernel_shape,
+            strides,
+            out_shape,
+            "AVG",
+            pads_required=extra_pads,
+            pads=pads,
+            count_include_pad=1,
+        )
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_averagepool_2d_pads_count_include_pad",
+        )
+
+    @staticmethod
+    def export_averagepool_2d_strides() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 10, 10]
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[5, 5],
+            strides=[3, 3],
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (5, 5)
+        strides = (3, 3)
+        out_shape, pads = get_output_shape_explicit_padding(
+            None, x_shape[2:], kernel_shape, strides, ceil_mode=False
+        )
+        padded = x
+        y = pool(
+            padded,
+            x_shape,
+            kernel_shape,
+            strides,
+            out_shape,
+            "AVG",
+            pads_required=pads,
+            pads=None,
+        )
+
+        expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_strides")
+
+    @staticmethod
+    def export_averagepool_2d_ceil() -> None:
+        """input_shape: [1, 1, 4, 4]
+        output_shape: [1, 1, 2, 2]
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[3, 3],
+            strides=[2, 2],
+            ceil_mode=True,
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[6, 7.5], [12, 13.5]]]]).astype(np.float32)
+
+        expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_ceil")
+
+    @staticmethod
+    def export_averagepool_2d_ceil_last_window_starts_on_pad() -> None:
+        """input_shape: [1, 3, 2, 2]
+        output_shape: [1, 3, 1, 1]
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[3, 3],
+            strides=[3, 3],
+            pads=[1, 1, 1, 1],
+            ceil_mode=True,
+            count_include_pad=1,
+        )
+        x = np.array(
+            [
+                [
+                    [[0.8580, 0.0786], [0.2692, 0.1537]],
+                    [[0.8816, 0.4353], [0.5772, 0.6623]],
+                    [[0.9067, 0.9483], [0.5970, 0.7630]],
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[0.1511]], [[0.2841]], [[0.3572]]]]).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_averagepool_2d_ceil_last_window_starts_on_pad",
+        )
+
+    @staticmethod
+    def export_averagepool_2d_dilations() -> None:
+        """input_shape: [1, 1, 4, 4]
+        output_shape: [1, 1, 2, 2]
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            strides=[1, 1],
+            dilations=[2, 2],
+            ceil_mode=True,
+        )
+
+        # input shape: [1, 1, 4, 4]
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+
+        y = np.array([[[[6, 7], [10, 11]]]]).astype(np.float32)
+
+        expect(node, inputs=[x], outputs=[y], name="test_averagepool_2d_dilations")
+
+    @staticmethod
+    def export_averagepool_3d_dilations() -> None:
+        """input_shape: [1, 1, 4, 4]
+        output_shape: [1, 1, 2, 2]
+        """
+        node = onnx.helper.make_node(
+            "AveragePool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2, 2],
+            strides=[1, 1, 1],
+            dilations=[2, 2, 2],
+            ceil_mode=True,
+        )
+
+        # input shape: [1, 1, 4, 4, 4]
+        x = np.array(
+            [
+                [
+                    [
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+
+        y = np.array([[[[[6, 7], [10, 11]], [[6, 7], [10, 11]]]]]).astype(np.float32)
+
+        expect(
+            node, inputs=[x], outputs=[y], name="test_averagepool_3d_dilations_small"
+        )
+
+    @staticmethod
+    def export_averagepool_3d_dilations_large() -> None:
+        x_shape = (32, 32, 32)
+        dilations = (2, 2, 2)
+        kernel_shape = (5, 5, 5)
+        strides = (3, 3, 3)
+        count_include_pad = 0
+
+        for count_include_pad in (0, 1):
+            for ceil_mode in (True, False):
+                node = onnx.helper.make_node(
+                    "AveragePool",
+                    inputs=["x"],
+                    outputs=["y"],
+                    kernel_shape=kernel_shape,
+                    strides=strides,
+                    dilations=dilations,
+                    count_include_pad=count_include_pad,
+                    ceil_mode=ceil_mode,
+                )
+
+                x = np.random.randn(1, 1, *x_shape).astype(np.float32)
+                out_shape, extra_pads = get_output_shape_explicit_padding(
+                    None,
+                    x_shape,
+                    kernel_shape,
+                    strides,
+                    dilations=dilations,
+                    ceil_mode=ceil_mode,
+                )
+                padded = np.pad(
+                    x,
+                    (
+                        (0, 0),
+                        (0, 0),
+                        (extra_pads[0], extra_pads[3]),
+                        (extra_pads[1], extra_pads[4]),
+                        (extra_pads[2], extra_pads[5]),
+                    ),
+                    mode="constant",
+                    constant_values=0 if count_include_pad == 1 else np.nan,
+                )
+                y = pool(
+                    padded,
+                    (1, 1, *x_shape),
+                    kernel_shape,
+                    strides,
+                    out_shape,
+                    "AVG",
+                    pads_required=extra_pads,
+                    pads=None,
+                    dilations=dilations,
+                    count_include_pad=count_include_pad,
+                )
+
+                test_name = f"test_averagepool_3d_dilations_large_count_include_pad_is_{count_include_pad}_ceil_mode_is_{ceil_mode}"
+                expect(node, inputs=[x], outputs=[y], name=test_name)
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/batchnorm.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/batchnorm.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4b05fe9eaa36c3c54803ed479578b363968334a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/batchnorm.py
@@ -0,0 +1,137 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5):
+    dims_x = len(x.shape)
+    dim_ones = (1,) * (dims_x - 2)
+    s = s.reshape(-1, *dim_ones)
+    bias = bias.reshape(-1, *dim_ones)
+    mean = mean.reshape(-1, *dim_ones)
+    var = var.reshape(-1, *dim_ones)
+    return s * (x - mean) / np.sqrt(var + epsilon) + bias
+
+
+def _batchnorm_training_mode(x, s, bias, mean, var, momentum=0.9, epsilon=1e-5):
+    axis = tuple(np.delete(np.arange(len(x.shape)), 1))
+    saved_mean = x.mean(axis=axis)
+    saved_var = x.var(axis=axis)
+    output_mean = mean * momentum + saved_mean * (1 - momentum)
+    output_var = var * momentum + saved_var * (1 - momentum)
+    y = _batchnorm_test_mode(x, s, bias, saved_mean, saved_var, epsilon=epsilon)
+    return y.astype(np.float32), output_mean, output_var
+
+
+class BatchNormalization(Base):
+    @staticmethod
+    def export() -> None:
+        # input size: (2, 3, 4, 5)
+        x = np.random.randn(2, 3, 4, 5).astype(np.float32)
+        s = np.random.randn(3).astype(np.float32)
+        bias = np.random.randn(3).astype(np.float32)
+        mean = np.random.randn(3).astype(np.float32)
+        var = np.random.rand(3).astype(np.float32)
+        y = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "BatchNormalization",
+            inputs=["x", "s", "bias", "mean", "var"],
+            outputs=["y"],
+        )
+
+        # output size: (2, 3, 4, 5)
+        expect(
+            node,
+            inputs=[x, s, bias, mean, var],
+            outputs=[y],
+            name="test_batchnorm_example",
+        )
+
+        # input size: (2, 3, 4, 5)
+        x = np.random.randn(2, 3, 4, 5).astype(np.float32)
+        s = np.random.randn(3).astype(np.float32)
+        bias = np.random.randn(3).astype(np.float32)
+        mean = np.random.randn(3).astype(np.float32)
+        var = np.random.rand(3).astype(np.float32)
+        epsilon = 1e-2
+        y = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "BatchNormalization",
+            inputs=["x", "s", "bias", "mean", "var"],
+            outputs=["y"],
+            epsilon=epsilon,
+        )
+
+        # output size: (2, 3, 4, 5)
+        expect(
+            node,
+            inputs=[x, s, bias, mean, var],
+            outputs=[y],
+            name="test_batchnorm_epsilon",
+        )
+
+    @staticmethod
+    def export_train() -> None:
+        # input size: (2, 3, 4, 5)
+        x = np.random.randn(2, 3, 4, 5).astype(np.float32)
+        s = np.random.randn(3).astype(np.float32)
+        bias = np.random.randn(3).astype(np.float32)
+        mean = np.random.randn(3).astype(np.float32)
+        var = np.random.rand(3).astype(np.float32)
+        # using np.bool(1) while generating test data with "'bool' object has no attribute 'dtype'"
+        # working around by using np.byte(1).astype(bool)
+        training_mode = 1
+        y, output_mean, output_var = _batchnorm_training_mode(x, s, bias, mean, var)
+
+        node = onnx.helper.make_node(
+            "BatchNormalization",
+            inputs=["x", "s", "bias", "mean", "var"],
+            outputs=["y", "output_mean", "output_var"],
+            training_mode=training_mode,
+        )
+
+        # output size: (2, 3, 4, 5)
+        expect(
+            node,
+            inputs=[x, s, bias, mean, var],
+            outputs=[y, output_mean, output_var],
+            name="test_batchnorm_example_training_mode",
+        )
+
+        # input size: (2, 3, 4, 5)
+        x = np.random.randn(2, 3, 4, 5).astype(np.float32)
+        s = np.random.randn(3).astype(np.float32)
+        bias = np.random.randn(3).astype(np.float32)
+        mean = np.random.randn(3).astype(np.float32)
+        var = np.random.rand(3).astype(np.float32)
+        training_mode = 1
+        momentum = 0.9
+        epsilon = 1e-2
+        y, output_mean, output_var = _batchnorm_training_mode(
+            x, s, bias, mean, var, momentum, epsilon
+        )
+
+        node = onnx.helper.make_node(
+            "BatchNormalization",
+            inputs=["x", "s", "bias", "mean", "var"],
+            outputs=["y", "output_mean", "output_var"],
+            epsilon=epsilon,
+            training_mode=training_mode,
+        )
+
+        # output size: (2, 3, 4, 5)
+        expect(
+            node,
+            inputs=[x, s, bias, mean, var],
+            outputs=[y, output_mean, output_var],
+            name="test_batchnorm_epsilon_training_mode",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bernoulli.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bernoulli.py
new file mode 100644
index 0000000000000000000000000000000000000000..b52e51682e644493635f90a120cc7c1e4225faed
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bernoulli.py
@@ -0,0 +1,59 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def bernoulli_reference_implementation(x, dtype):
+    # binomial n = 1 equal bernoulli
+    # This example and test-case is for informational purpose. The generator operator is
+    # non-deterministic and may not produce the same values in different implementations
+    # even if a seed is specified.
+    return np.random.binomial(1, p=x).astype(dtype)
+
+
+class Bernoulli(Base):
+    @staticmethod
+    def export_bernoulli_without_dtype() -> None:
+        node = onnx.helper.make_node(
+            "Bernoulli",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.random.uniform(0.0, 1.0, 10).astype(float)
+        y = bernoulli_reference_implementation(x, float)
+        expect(node, inputs=[x], outputs=[y], name="test_bernoulli")
+
+    @staticmethod
+    def export_bernoulli_with_dtype() -> None:
+        node = onnx.helper.make_node(
+            "Bernoulli",
+            inputs=["x"],
+            outputs=["y"],
+            dtype=onnx.TensorProto.DOUBLE,
+        )
+
+        x = np.random.uniform(0.0, 1.0, 10).astype(np.float32)
+        y = bernoulli_reference_implementation(x, float)
+        expect(node, inputs=[x], outputs=[y], name="test_bernoulli_double")
+
+    @staticmethod
+    def export_bernoulli_with_seed() -> None:
+        seed = float(0)
+        node = onnx.helper.make_node(
+            "Bernoulli",
+            inputs=["x"],
+            outputs=["y"],
+            seed=seed,
+        )
+
+        x = np.random.uniform(0.0, 1.0, 10).astype(np.float32)
+        y = bernoulli_reference_implementation(x, np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_bernoulli_seed")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitshift.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitshift.py
new file mode 100644
index 0000000000000000000000000000000000000000..372007394feda15c90abadaccdcfdde6666bfc0c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitshift.py
@@ -0,0 +1,100 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class BitShift(Base):
+    @staticmethod
+    def export_right_unit8() -> None:
+        node = onnx.helper.make_node(
+            "BitShift", inputs=["x", "y"], outputs=["z"], direction="RIGHT"
+        )
+
+        x = np.array([16, 4, 1]).astype(np.uint8)
+        y = np.array([1, 2, 3]).astype(np.uint8)
+        z = x >> y  # expected output [8, 1, 0]
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_right_uint8")
+
+    @staticmethod
+    def export_right_unit16() -> None:
+        node = onnx.helper.make_node(
+            "BitShift", inputs=["x", "y"], outputs=["z"], direction="RIGHT"
+        )
+
+        x = np.array([16, 4, 1]).astype(np.uint16)
+        y = np.array([1, 2, 3]).astype(np.uint16)
+        z = x >> y  # expected output [8, 1, 0]
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_right_uint16")
+
+    @staticmethod
+    def export_right_unit32() -> None:
+        node = onnx.helper.make_node(
+            "BitShift", inputs=["x", "y"], outputs=["z"], direction="RIGHT"
+        )
+
+        x = np.array([16, 4, 1]).astype(np.uint32)
+        y = np.array([1, 2, 3]).astype(np.uint32)
+        z = x >> y  # expected output [8, 1, 0]
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_right_uint32")
+
+    @staticmethod
+    def export_right_unit64() -> None:
+        node = onnx.helper.make_node(
+            "BitShift", inputs=["x", "y"], outputs=["z"], direction="RIGHT"
+        )
+
+        x = np.array([16, 4, 1]).astype(np.uint64)
+        y = np.array([1, 2, 3]).astype(np.uint64)
+        z = x >> y  # expected output [8, 1, 0]
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_right_uint64")
+
+    @staticmethod
+    def export_left_unit8() -> None:
+        node = onnx.helper.make_node(
+            "BitShift", inputs=["x", "y"], outputs=["z"], direction="LEFT"
+        )
+
+        x = np.array([16, 4, 1]).astype(np.uint8)
+        y = np.array([1, 2, 3]).astype(np.uint8)
+        z = x << y  # expected output [32, 16, 8]
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_left_uint8")
+
+    @staticmethod
+    def export_left_unit16() -> None:
+        node = onnx.helper.make_node(
+            "BitShift", inputs=["x", "y"], outputs=["z"], direction="LEFT"
+        )
+
+        x = np.array([16, 4, 1]).astype(np.uint16)
+        y = np.array([1, 2, 3]).astype(np.uint16)
+        z = x << y  # expected output [32, 16, 8]
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_left_uint16")
+
+    @staticmethod
+    def export_left_unit32() -> None:
+        node = onnx.helper.make_node(
+            "BitShift", inputs=["x", "y"], outputs=["z"], direction="LEFT"
+        )
+
+        x = np.array([16, 4, 1]).astype(np.uint32)
+        y = np.array([1, 2, 3]).astype(np.uint32)
+        z = x << y  # expected output [32, 16, 8]
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_left_uint32")
+
+    @staticmethod
+    def export_left_unit64() -> None:
+        node = onnx.helper.make_node(
+            "BitShift", inputs=["x", "y"], outputs=["z"], direction="LEFT"
+        )
+
+        x = np.array([16, 4, 1]).astype(np.uint64)
+        y = np.array([1, 2, 3]).astype(np.uint64)
+        z = x << y  # expected output [32, 16, 8]
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitshift_left_uint64")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwiseand.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwiseand.py
new file mode 100644
index 0000000000000000000000000000000000000000..271ad9e280c00701abfa7177c633a9b21bf9aaf5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwiseand.py
@@ -0,0 +1,55 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.numpy_helper import create_random_int
+
+
+class BitwiseAnd(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "BitwiseAnd",
+            inputs=["x", "y"],
+            outputs=["bitwiseand"],
+        )
+
+        # 2d
+        x = create_random_int((3, 4), np.int32)
+        y = create_random_int((3, 4), np.int32)
+        z = np.bitwise_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_and_i32_2d")
+
+        # 3d
+        x = create_random_int((3, 4, 5), np.int16)
+        y = create_random_int((3, 4, 5), np.int16)
+        z = np.bitwise_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_and_i16_3d")
+
+    @staticmethod
+    def export_bitwiseand_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "BitwiseAnd",
+            inputs=["x", "y"],
+            outputs=["bitwiseand"],
+        )
+
+        # 3d vs 1d
+        x = create_random_int((3, 4, 5), np.uint64)
+        y = create_random_int((5,), np.uint64)
+        z = np.bitwise_and(x, y)
+        expect(
+            node, inputs=[x, y], outputs=[z], name="test_bitwise_and_ui64_bcast_3v1d"
+        )
+
+        # 4d vs 3d
+        x = create_random_int((3, 4, 5, 6), np.uint8)
+        y = create_random_int((4, 5, 6), np.uint8)
+        z = np.bitwise_and(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_and_ui8_bcast_4v3d")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwisenot.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwisenot.py
new file mode 100644
index 0000000000000000000000000000000000000000..28375830c40ffdf404066a121fead84639c5fe35
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwisenot.py
@@ -0,0 +1,36 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.numpy_helper import create_random_int
+
+
+class BitwiseNot(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "BitwiseNot",
+            inputs=["x"],
+            outputs=["bitwise_not"],
+        )
+
+        # 2d
+        x = create_random_int((3, 4), np.int32)
+        y = np.bitwise_not(x)
+        expect(node, inputs=[x], outputs=[y], name="test_bitwise_not_2d")
+
+        # 3d
+        x = create_random_int((3, 4, 5), np.uint16)
+        y = np.bitwise_not(x)
+        expect(node, inputs=[x], outputs=[y], name="test_bitwise_not_3d")
+
+        # 4d
+        x = create_random_int((3, 4, 5, 6), np.uint8)
+        y = np.bitwise_not(x)
+        expect(node, inputs=[x], outputs=[y], name="test_bitwise_not_4d")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwiseor.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwiseor.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa1d82acb96d942bdfd945464901d8de2bf0d910
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwiseor.py
@@ -0,0 +1,52 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.numpy_helper import create_random_int
+
+
+class BitwiseOr(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "BitwiseOr",
+            inputs=["x", "y"],
+            outputs=["bitwiseor"],
+        )
+        # 2d
+        x = create_random_int((3, 4), np.int32)
+        y = create_random_int((3, 4), np.int32)
+        z = np.bitwise_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_or_i32_2d")
+
+        # 4d
+        x = create_random_int((3, 4, 5, 6), np.int8)
+        y = create_random_int((3, 4, 5, 6), np.int8)
+        z = np.bitwise_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_or_i16_4d")
+
+    @staticmethod
+    def export_bitwiseor_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "BitwiseOr",
+            inputs=["x", "y"],
+            outputs=["bitwiseor"],
+        )
+
+        # 3d vs 1d
+        x = create_random_int((3, 4, 5), np.uint64)
+        y = create_random_int((5,), np.uint64)
+        z = np.bitwise_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_or_ui64_bcast_3v1d")
+
+        # 4d vs 3d
+        x = create_random_int((3, 4, 5, 6), np.uint8)
+        y = create_random_int((4, 5, 6), np.uint8)
+        z = np.bitwise_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_or_ui8_bcast_4v3d")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwisexor.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwisexor.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbbd98ee5fd9dd548d634da30a5b89f26d547a47
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/bitwisexor.py
@@ -0,0 +1,55 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.numpy_helper import create_random_int
+
+
+class BitwiseXor(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "BitwiseXor",
+            inputs=["x", "y"],
+            outputs=["bitwisexor"],
+        )
+
+        # 2d
+        x = create_random_int((3, 4), np.int32)
+        y = create_random_int((3, 4), np.int32)
+        z = np.bitwise_xor(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_xor_i32_2d")
+
+        # 3d
+        x = create_random_int((3, 4, 5), np.int16)
+        y = create_random_int((3, 4, 5), np.int16)
+        z = np.bitwise_xor(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_xor_i16_3d")
+
+    @staticmethod
+    def export_bitwiseor_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "BitwiseXor",
+            inputs=["x", "y"],
+            outputs=["bitwisexor"],
+        )
+
+        # 3d vs 1d
+        x = create_random_int((3, 4, 5), np.uint64)
+        y = create_random_int((5,), np.uint64)
+        z = np.bitwise_xor(x, y)
+        expect(
+            node, inputs=[x, y], outputs=[z], name="test_bitwise_xor_ui64_bcast_3v1d"
+        )
+
+        # 4d vs 3d
+        x = create_random_int((3, 4, 5, 6), np.uint8)
+        y = create_random_int((4, 5, 6), np.uint8)
+        z = np.bitwise_xor(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_bitwise_xor_ui8_bcast_4v3d")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/blackmanwindow.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/blackmanwindow.py
new file mode 100644
index 0000000000000000000000000000000000000000..13f92d62da16a071f133cceb819308ed9a20b943
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/blackmanwindow.py
@@ -0,0 +1,56 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class BlackmanWindow(Base):
+    @staticmethod
+    def export() -> None:
+        # Test periodic window
+        node = onnx.helper.make_node(
+            "BlackmanWindow",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        size = np.int32(10)
+        a0 = 0.42
+        a1 = -0.5
+        a2 = 0.08
+        y = a0
+        y += a1 * np.cos(2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size)
+        y += a2 * np.cos(4 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size)
+        expect(
+            node,
+            inputs=[size],
+            outputs=[y.astype(np.float32)],
+            name="test_blackmanwindow",
+        )
+
+        # Test symmetric window
+        node = onnx.helper.make_node(
+            "BlackmanWindow", inputs=["x"], outputs=["y"], periodic=0
+        )
+        size = np.int32(10)
+        a0 = 0.42
+        a1 = -0.5
+        a2 = 0.08
+        y = a0
+        y += a1 * np.cos(
+            2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1)
+        )
+        y += a2 * np.cos(
+            4 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1)
+        )
+        expect(
+            node,
+            inputs=[size],
+            outputs=[y.astype(np.float32)],
+            name="test_blackmanwindow_symmetric",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/cast.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/cast.py
new file mode 100644
index 0000000000000000000000000000000000000000..621058b95877264f1841edc3ddf035f2e00f7c75
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/cast.py
@@ -0,0 +1,369 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import itertools
+
+import numpy as np
+
+import onnx
+from onnx import TensorProto
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.helper import (
+    make_tensor,
+    tensor_dtype_to_np_dtype,
+)
+from onnx.numpy_helper import to_float8e8m0
+
+F8_TYPES = frozenset({"FLOAT8E4M3FN", "FLOAT8E4M3FNUZ", "FLOAT8E5M2", "FLOAT8E5M2FNUZ"})
+FOUR_BIT_TYPES = frozenset({"UINT4", "INT4", "FLOAT4E2M1"})
+
+
+class Cast(Base):
+    @staticmethod
+    def export() -> None:
+        test_cases = [
+            ("FLOAT", "FLOAT16"),
+            ("FLOAT", "DOUBLE"),
+            ("FLOAT16", "FLOAT"),
+            ("FLOAT16", "DOUBLE"),
+            ("DOUBLE", "FLOAT"),
+            ("DOUBLE", "FLOAT16"),
+            ("FLOAT", "BFLOAT16"),
+            ("BFLOAT16", "FLOAT"),
+            ("FLOAT", "FLOAT8E4M3FN"),
+            ("FLOAT16", "FLOAT8E4M3FN"),
+            ("FLOAT", "FLOAT8E4M3FNUZ"),
+            ("FLOAT16", "FLOAT8E4M3FNUZ"),
+            ("FLOAT8E4M3FN", "FLOAT"),
+            ("FLOAT8E4M3FN", "FLOAT16"),
+            ("FLOAT8E4M3FNUZ", "FLOAT"),
+            ("FLOAT8E4M3FNUZ", "FLOAT16"),
+            ("FLOAT", "FLOAT8E5M2"),
+            ("FLOAT16", "FLOAT8E5M2"),
+            ("FLOAT", "FLOAT8E5M2FNUZ"),
+            ("FLOAT16", "FLOAT8E5M2FNUZ"),
+            ("FLOAT8E5M2", "FLOAT"),
+            ("FLOAT8E5M2", "FLOAT16"),
+            ("FLOAT8E5M2FNUZ", "FLOAT"),
+            ("FLOAT8E5M2FNUZ", "FLOAT16"),
+            ("FLOAT", "UINT4"),
+            ("FLOAT16", "UINT4"),
+            ("FLOAT", "INT4"),
+            ("FLOAT16", "INT4"),
+            ("UINT4", "FLOAT"),
+            ("UINT4", "FLOAT16"),
+            ("UINT4", "UINT8"),
+            ("INT4", "FLOAT"),
+            ("INT4", "FLOAT16"),
+            ("INT4", "INT8"),
+            ("FLOAT4E2M1", "FLOAT"),
+            ("FLOAT4E2M1", "FLOAT16"),
+            ("FLOAT", "FLOAT4E2M1"),
+            ("FLOAT16", "FLOAT4E2M1"),
+        ]
+
+        for from_type, to_type in test_cases:
+            if from_type == to_type:
+                # Skip cases where from_type and to_type are the same
+                continue
+            from_dtype = getattr(TensorProto, from_type)
+            to_dtype = getattr(TensorProto, to_type)
+            from_np_dtype = tensor_dtype_to_np_dtype(from_dtype)
+            to_np_dtype = tensor_dtype_to_np_dtype(to_dtype)
+
+            if from_type == "BFLOAT16" or to_type == "BFLOAT16":
+                np_fp32 = np.array(
+                    [
+                        "0.47892547",
+                        "0.48033667",
+                        "0.49968487",
+                        "0.81910545",
+                        "0.47031248",
+                        "0.816468",
+                        "0.21087195",
+                        "0.7229038",
+                        "NaN",
+                        "INF",
+                        "+INF",
+                        "-INF",
+                    ],
+                    dtype=np.float32,
+                )
+                input_shape = (3, 4)
+
+            elif from_type in F8_TYPES or to_type in F8_TYPES:
+                np_fp32 = np.array(
+                    [
+                        "0.47892547",
+                        "0.48033667",
+                        "0.49968487",
+                        "0.81910545",
+                        "0.47031248",
+                        "0.7229038",
+                        "1000000",
+                        "1e-7",
+                        "NaN",
+                        "INF",
+                        "+INF",
+                        "-INF",
+                        "-0.0000001",
+                        "0.0000001",
+                        "-1000000",
+                    ],
+                    dtype=np.float32,
+                )
+                input_shape = (3, 5)
+            elif from_type in ("UINT4", "INT4") or to_type in ("UINT4", "INT4"):
+                np_fp32 = np.arange(-9, 16).astype(np.float32)
+                input_shape = (5, 5)
+            elif from_type == "FLOAT4E2M1" or to_type == "FLOAT4E2M1":
+                np_fp32 = np.array(
+                    [
+                        "0.48",
+                        "0.25",
+                        "1.05",
+                        "-3.5",
+                        "-8",
+                        "9",
+                        "1000000",
+                        "1e-7",
+                        "NaN",
+                        "INF",
+                        "+INF",
+                        "-INF",
+                        "-4",
+                        "0.01",
+                        "-0.0",
+                    ],
+                    dtype=np.float32,
+                )
+                input_shape = (3, 5)
+
+            else:
+                np_fp32 = np.array(
+                    [
+                        "0.47892547",
+                        "0.48033667",
+                        "0.49968487",
+                        "0.81910545",
+                        "0.47031248",
+                        "0.816468",
+                        "0.21087195",
+                        "0.7229038",
+                        "NaN",
+                        "INF",
+                        "+INF",
+                        "-INF",
+                    ],
+                    dtype=np.float32,
+                ).reshape([3, 4])
+                input_shape = (3, 4)
+
+            if from_type in F8_TYPES:
+                np_from = onnx.numpy_helper.saturate_cast(np_fp32, from_np_dtype)
+                input = make_tensor(
+                    "input",
+                    from_dtype,
+                    input_shape,
+                    vals=np_from,
+                    raw=True,
+                )
+            elif from_type in FOUR_BIT_TYPES:
+                np_from = np_fp32.astype(from_np_dtype)
+                packed = onnx.numpy_helper._pack_4bitx2(np_from)
+                input = make_tensor(
+                    "input", from_dtype, input_shape, vals=packed.tobytes(), raw=True
+                )
+            else:
+                np_from = np_fp32.astype(from_np_dtype)
+                input = make_tensor(
+                    "input", from_dtype, input_shape, vals=np_from, raw=True
+                )
+
+            if to_type in F8_TYPES:
+                output = make_tensor(
+                    "output",
+                    to_dtype,
+                    input_shape,
+                    vals=onnx.numpy_helper.saturate_cast(np_from, to_np_dtype),
+                    raw=True,
+                )
+            elif to_type in FOUR_BIT_TYPES:
+                packed = onnx.numpy_helper._pack_4bitx2(np_from.astype(to_np_dtype))
+                output = make_tensor(
+                    "output", to_dtype, input_shape, vals=packed.tobytes(), raw=True
+                )
+            else:
+                output = make_tensor(
+                    "output",
+                    to_dtype,
+                    input_shape,
+                    vals=np_from.astype(to_np_dtype),
+                    raw=True,
+                )
+
+            node = onnx.helper.make_node(
+                "Cast",
+                inputs=["input"],
+                outputs=["output"],
+                to=to_dtype,
+            )
+            expect(
+                node,
+                inputs=[input],
+                outputs=[output],
+                name="test_cast_" + from_type + "_to_" + to_type,
+            )
+
+    @staticmethod
+    def export_saturate_false() -> None:
+        test_cases = itertools.product(
+            [
+                "FLOAT",
+                "FLOAT16",
+            ],
+            [
+                "FLOAT8E4M3FN",
+                "FLOAT8E4M3FNUZ",
+                "FLOAT8E5M2",
+                "FLOAT8E5M2FNUZ",
+            ],
+        )
+        input_shape = (3, 5)
+        for from_type, to_type in test_cases:
+            from_dtype = getattr(TensorProto, from_type)
+            to_dtype = getattr(TensorProto, to_type)
+            from_np_dtype = tensor_dtype_to_np_dtype(from_dtype)
+            to_np_dtype = tensor_dtype_to_np_dtype(to_dtype)
+            np_fp32 = np.array(
+                [
+                    "0.47892547",
+                    "0.48033667",
+                    "0.49968487",
+                    "0.81910545",
+                    "0.47031248",
+                    "0.7229038",
+                    "1000000",
+                    "1e-7",
+                    "NaN",
+                    "INF",
+                    "+INF",
+                    "-INF",
+                    "-0.0000001",
+                    "0.0000001",
+                    "-1000000",
+                ],
+                dtype=np.float32,
+            )
+
+            input = make_tensor(
+                "input",
+                from_dtype,
+                input_shape,
+                vals=np_fp32.astype(from_np_dtype),
+                raw=True,
+            )
+            output = make_tensor(
+                "output",
+                to_dtype,
+                input_shape,
+                vals=np_fp32.astype(from_np_dtype).astype(to_np_dtype),
+                raw=True,
+            )
+
+            node = onnx.helper.make_node(
+                "Cast",
+                inputs=["input"],
+                outputs=["output"],
+                to=to_dtype,
+                saturate=0,
+            )
+            expect(
+                node,
+                inputs=[input],
+                outputs=[output],
+                name="test_cast_no_saturate_" + from_type + "_to_" + to_type,
+            )
+
+    @staticmethod
+    def export_e8m0() -> None:
+        np_fp32 = np.array(
+            [
+                "0.0",
+                "0.124",
+                "0.25",
+                "0.5",
+                "1.1",
+                "2.0",
+                "4.0",
+                "8.0",
+            ],
+            dtype=np.float32,
+        )
+        test_cases = [
+            ("FLOAT", "FLOAT8E8M0"),
+            ("FLOAT16", "FLOAT8E8M0"),
+            ("FLOAT8E8M0", "FLOAT"),
+            ("FLOAT8E8M0", "FLOAT16"),
+        ]
+        for from_type, to_type in test_cases:
+            if from_type == "FLOAT":
+                input_np = np_fp32
+                output_np = to_float8e8m0(np_fp32)
+            elif from_type == "FLOAT16":
+                input_np = np_fp32.astype(np.float16)
+                output_np = to_float8e8m0(input_np)
+            elif from_type == "FLOAT8E8M0":
+                input_np = to_float8e8m0(np_fp32)
+                if to_type == "FLOAT":
+                    output_np = input_np.astype(np.float32)
+                elif to_type == "FLOAT16":
+                    output_np = input_np.astype(np.float16)
+                else:
+                    raise ValueError(
+                        f"Conversion from {from_type} to {to_type} is not tested."
+                    )
+            else:
+                raise ValueError(
+                    f"Conversion from {from_type} to {to_type} is not tested."
+                )
+            input = make_tensor(
+                "input",
+                getattr(TensorProto, from_type),
+                [2, 4],
+                input_np.tobytes(),
+                raw=True,
+            )
+            output = make_tensor(
+                "output",
+                getattr(TensorProto, to_type),
+                [2, 4],
+                output_np.tobytes(),
+                raw=True,
+            )
+            if to_type == "FLOAT8E8M0":
+                node = onnx.helper.make_node(
+                    "Cast",
+                    inputs=["input"],
+                    outputs=["output"],
+                    to=getattr(TensorProto, to_type),
+                    saturate=1,
+                    round_mode="up",
+                )
+            else:
+                node = onnx.helper.make_node(
+                    "Cast",
+                    inputs=["input"],
+                    outputs=["output"],
+                    to=getattr(TensorProto, to_type),
+                )
+
+            expect(
+                node,
+                inputs=[input],
+                outputs=[output],
+                name="test_cast_e8m0_" + from_type + "_to_" + to_type,
+            )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/castlike.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/castlike.py
new file mode 100644
index 0000000000000000000000000000000000000000..749c8304b240a3fdf8459703f57f322160b350b3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/castlike.py
@@ -0,0 +1,291 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import itertools
+
+import numpy as np
+
+import onnx
+from onnx import TensorProto
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.helper import make_tensor, tensor_dtype_to_np_dtype
+
+F8_TYPES = frozenset({"FLOAT8E4M3FN", "FLOAT8E4M3FNUZ", "FLOAT8E5M2", "FLOAT8E5M2FNUZ"})
+FOUR_BIT_TYPES = frozenset({"UINT4", "INT4", "FLOAT4E2M1"})
+
+
+class CastLike(Base):
+    @staticmethod
+    def export() -> None:
+        test_cases = [
+            ("FLOAT", "FLOAT16"),
+            ("FLOAT", "DOUBLE"),
+            ("FLOAT16", "FLOAT"),
+            ("FLOAT16", "DOUBLE"),
+            ("DOUBLE", "FLOAT"),
+            ("DOUBLE", "FLOAT16"),
+            ("FLOAT", "BFLOAT16"),
+            ("BFLOAT16", "FLOAT"),
+            ("FLOAT", "FLOAT8E4M3FN"),
+            ("FLOAT16", "FLOAT8E4M3FN"),
+            ("FLOAT", "FLOAT8E4M3FNUZ"),
+            ("FLOAT16", "FLOAT8E4M3FNUZ"),
+            ("FLOAT8E4M3FN", "FLOAT"),
+            ("FLOAT8E4M3FN", "FLOAT16"),
+            ("FLOAT8E4M3FNUZ", "FLOAT"),
+            ("FLOAT8E4M3FNUZ", "FLOAT16"),
+            ("FLOAT", "FLOAT8E5M2"),
+            ("FLOAT16", "FLOAT8E5M2"),
+            ("FLOAT", "FLOAT8E5M2FNUZ"),
+            ("FLOAT16", "FLOAT8E5M2FNUZ"),
+            ("FLOAT8E5M2", "FLOAT"),
+            ("FLOAT8E5M2", "FLOAT16"),
+            ("FLOAT8E5M2FNUZ", "FLOAT"),
+            ("FLOAT8E5M2FNUZ", "FLOAT16"),
+            ("FLOAT", "UINT4"),
+            ("FLOAT16", "UINT4"),
+            ("FLOAT", "INT4"),
+            ("FLOAT16", "INT4"),
+            ("UINT4", "FLOAT"),
+            ("UINT4", "FLOAT16"),
+            ("UINT4", "UINT8"),
+            ("INT4", "FLOAT"),
+            ("INT4", "FLOAT16"),
+            ("INT4", "INT8"),
+            ("FLOAT4E2M1", "FLOAT"),
+            ("FLOAT4E2M1", "FLOAT16"),
+            ("FLOAT", "FLOAT4E2M1"),
+            ("FLOAT16", "FLOAT4E2M1"),
+        ]
+
+        f8_types = {"FLOAT8E4M3FN", "FLOAT8E4M3FNUZ", "FLOAT8E5M2", "FLOAT8E5M2FNUZ"}
+
+        for from_type, to_type in test_cases:
+            if from_type == to_type:
+                # Skip cases where from_type and to_type are the same
+                continue
+            from_dtype = getattr(TensorProto, from_type)
+            to_dtype = getattr(TensorProto, to_type)
+            from_np_dtype = tensor_dtype_to_np_dtype(from_dtype)
+            to_np_dtype = tensor_dtype_to_np_dtype(to_dtype)
+
+            if from_type == "BFLOAT16" or to_type == "BFLOAT16":
+                np_fp32 = np.array(
+                    [
+                        "0.47892547",
+                        "0.48033667",
+                        "0.49968487",
+                        "0.81910545",
+                        "0.47031248",
+                        "0.816468",
+                        "0.21087195",
+                        "0.7229038",
+                        "NaN",
+                        "INF",
+                        "+INF",
+                        "-INF",
+                    ],
+                    dtype=np.float32,
+                )
+                input_shape = (3, 4)
+
+            elif from_type in f8_types or to_type in f8_types:
+                np_fp32 = np.array(
+                    [
+                        "0.47892547",
+                        "0.48033667",
+                        "0.49968487",
+                        "0.81910545",
+                        "0.47031248",
+                        "0.7229038",
+                        "1000000",
+                        "1e-7",
+                        "NaN",
+                        "INF",
+                        "+INF",
+                        "-INF",
+                        "-0.0000001",
+                        "0.0000001",
+                        "-1000000",
+                    ],
+                    dtype=np.float32,
+                )
+                input_shape = (3, 5)
+            elif from_type in ("UINT4", "INT4") or to_type in ("UINT4", "INT4"):
+                np_fp32 = np.arange(-9, 16).astype(np.float32)
+                input_shape = (5, 5)
+            elif from_type == "FLOAT4E2M1" or to_type == "FLOAT4E2M1":
+                np_fp32 = np.array(
+                    [
+                        "0.48",
+                        "0.25",
+                        "1.05",
+                        "-3.5",
+                        "-8",
+                        "9",
+                        "1000000",
+                        "1e-7",
+                        "NaN",
+                        "INF",
+                        "+INF",
+                        "-INF",
+                        "-4",
+                        "0.01",
+                        "-0.0",
+                    ],
+                    dtype=np.float32,
+                )
+                input_shape = (3, 5)
+
+            else:
+                np_fp32 = np.array(
+                    [
+                        "0.47892547",
+                        "0.48033667",
+                        "0.49968487",
+                        "0.81910545",
+                        "0.47031248",
+                        "0.816468",
+                        "0.21087195",
+                        "0.7229038",
+                        "NaN",
+                        "INF",
+                        "+INF",
+                        "-INF",
+                    ],
+                    dtype=np.float32,
+                ).reshape([3, 4])
+                input_shape = (3, 4)
+
+            if from_type in F8_TYPES:
+                np_from = onnx.numpy_helper.saturate_cast(np_fp32, from_np_dtype)
+                input = make_tensor(
+                    "input",
+                    from_dtype,
+                    input_shape,
+                    vals=np_from,
+                    raw=True,
+                )
+            elif from_type in FOUR_BIT_TYPES:
+                np_from = np_fp32.astype(from_np_dtype)
+                packed = onnx.numpy_helper._pack_4bitx2(np_from)
+                input = make_tensor(
+                    "input", from_dtype, input_shape, vals=packed.tobytes(), raw=True
+                )
+            else:
+                np_from = np_fp32.astype(from_np_dtype)
+                input = make_tensor(
+                    "input", from_dtype, input_shape, vals=np_from, raw=True
+                )
+
+            if to_type in F8_TYPES:
+                output = make_tensor(
+                    "output",
+                    to_dtype,
+                    input_shape,
+                    vals=onnx.numpy_helper.saturate_cast(np_from, to_np_dtype),
+                    raw=True,
+                )
+            elif to_type in FOUR_BIT_TYPES:
+                packed = onnx.numpy_helper._pack_4bitx2(np_from.astype(to_np_dtype))
+                output = make_tensor(
+                    "output", to_dtype, input_shape, vals=packed.tobytes(), raw=True
+                )
+            else:
+                output = make_tensor(
+                    "output",
+                    to_dtype,
+                    input_shape,
+                    vals=np_from.astype(to_np_dtype),
+                    raw=True,
+                )
+
+            like = make_tensor("like", to_dtype, (0,), vals=[])
+
+            node = onnx.helper.make_node(
+                "CastLike",
+                inputs=["input", "like"],
+                outputs=["output"],
+            )
+
+            expect(
+                node,
+                inputs=[input, like],
+                outputs=[output],
+                name="test_castlike_" + from_type + "_to_" + to_type,
+            )
+
+    @staticmethod
+    def export_saturate_false() -> None:
+        test_cases = itertools.product(
+            [
+                "FLOAT",
+                "FLOAT16",
+            ],
+            [
+                "FLOAT8E4M3FN",
+                "FLOAT8E4M3FNUZ",
+                "FLOAT8E5M2",
+                "FLOAT8E5M2FNUZ",
+            ],
+        )
+        input_shape = (3, 5)
+        for from_type, to_type in test_cases:
+            from_dtype = getattr(TensorProto, from_type)
+            to_dtype = getattr(TensorProto, to_type)
+            from_np_dtype = tensor_dtype_to_np_dtype(from_dtype)
+            to_np_dtype = tensor_dtype_to_np_dtype(to_dtype)
+            np_fp32 = np.array(
+                [
+                    "0.47892547",
+                    "0.48033667",
+                    "0.49968487",
+                    "0.81910545",
+                    "0.47031248",
+                    "0.7229038",
+                    "1000000",
+                    "1e-7",
+                    "NaN",
+                    "INF",
+                    "+INF",
+                    "-INF",
+                    "-0.0000001",
+                    "0.0000001",
+                    "-1000000",
+                ],
+                dtype=np.float32,
+            )
+
+            input = make_tensor(
+                "input",
+                from_dtype,
+                input_shape,
+                vals=np_fp32.astype(from_np_dtype),
+                raw=True,
+            )
+            output = make_tensor(
+                "output",
+                to_dtype,
+                input_shape,
+                vals=np_fp32.astype(from_np_dtype).astype(to_np_dtype),
+                raw=True,
+            )
+
+            like = make_tensor("like", to_dtype, (0,), vals=[])
+
+            node = onnx.helper.make_node(
+                "CastLike",
+                inputs=["input", "like"],
+                outputs=["output"],
+                saturate=0,
+            )
+
+            expect(
+                node,
+                inputs=[input, like],
+                outputs=[output],
+                name="test_castlike_no_saturate_" + from_type + "_to_" + to_type,
+            )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ceil.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ceil.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e4e754c1db4bb2c883858ff847564ccb729899a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/ceil.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Ceil(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Ceil",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1.5, 1.2]).astype(np.float32)
+        y = np.ceil(x)  # expected output [-1., 2.]
+        expect(node, inputs=[x], outputs=[y], name="test_ceil_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.ceil(x)
+        expect(node, inputs=[x], outputs=[y], name="test_ceil")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/celu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/celu.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d24b67f98030f40de3606cba24fa92641c65876
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/celu.py
@@ -0,0 +1,50 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Celu(Base):
+    @staticmethod
+    def export() -> None:
+        alpha = 2.0
+        node = onnx.helper.make_node(
+            "Celu",
+            inputs=["X"],
+            outputs=["Y"],
+            alpha=alpha,
+        )
+
+        input_data = np.array(
+            [
+                [
+                    [[0.8439683], [0.5665144], [0.05836735]],
+                    [[0.02916367], [0.12964272], [0.5060197]],
+                    [[0.79538304], [0.9411346], [0.9546573]],
+                ],
+                [
+                    [[0.17730942], [0.46192095], [0.26480448]],
+                    [[0.6746842], [0.01665257], [0.62473077]],
+                    [[0.9240844], [0.9722341], [0.11965699]],
+                ],
+                [
+                    [[0.41356155], [0.9129373], [0.59330076]],
+                    [[0.81929934], [0.7862604], [0.11799799]],
+                    [[0.69248444], [0.54119414], [0.07513223]],
+                ],
+            ],
+            dtype=np.float32,
+        )
+
+        # Calculate expected output data
+        positive_input = np.maximum(0, input_data)
+        negative_input = np.minimum(0, alpha * (np.exp(input_data / alpha) - 1))
+        expected_output = positive_input + negative_input
+
+        expect(node, inputs=[input_data], outputs=[expected_output], name="test_celu")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/center_crop_pad.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/center_crop_pad.py
new file mode 100644
index 0000000000000000000000000000000000000000..732192249492f331e94e3244ad2d1c8dc39a2d5e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/center_crop_pad.py
@@ -0,0 +1,130 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class CenterCropPad(Base):
+    @staticmethod
+    def export_center_crop_pad_crop() -> None:
+        node = onnx.helper.make_node(
+            "CenterCropPad",
+            inputs=["x", "shape"],
+            outputs=["y"],
+        )
+
+        # First dim is even diff, second is uneven
+        x = np.random.randn(20, 10, 3).astype(np.float32)
+        shape = np.array([10, 7, 3], dtype=np.int64)
+        y = x[5:15, 1:8, :]
+
+        expect(node, inputs=[x, shape], outputs=[y], name="test_center_crop_pad_crop")
+
+    @staticmethod
+    def export_center_crop_pad_pad() -> None:
+        node = onnx.helper.make_node(
+            "CenterCropPad",
+            inputs=["x", "shape"],
+            outputs=["y"],
+        )
+
+        # First dim is even diff, second is uneven
+        x = np.random.randn(10, 7, 3).astype(np.float32)
+        shape = np.array([20, 10, 3], dtype=np.int64)
+        y = np.zeros([20, 10, 3], dtype=np.float32)
+        y[5:15, 1:8, :] = x
+
+        expect(node, inputs=[x, shape], outputs=[y], name="test_center_crop_pad_pad")
+
+    @staticmethod
+    def export_center_crop_pad_crop_and_pad() -> None:
+        node = onnx.helper.make_node(
+            "CenterCropPad",
+            inputs=["x", "shape"],
+            outputs=["y"],
+        )
+
+        # Cropping on first dim, padding on second, third stays the same
+        x = np.random.randn(20, 8, 3).astype(np.float32)
+        shape = np.array([10, 10, 3], dtype=np.int64)
+        y = np.zeros([10, 10, 3], dtype=np.float32)
+        y[:, 1:9, :] = x[5:15, :, :]
+
+        expect(
+            node,
+            inputs=[x, shape],
+            outputs=[y],
+            name="test_center_crop_pad_crop_and_pad",
+        )
+
+    @staticmethod
+    def export_center_crop_pad_crop_axes_hwc() -> None:
+        node = onnx.helper.make_node(
+            "CenterCropPad",
+            inputs=["x", "shape"],
+            outputs=["y"],
+            axes=[0, 1],
+        )
+
+        # Cropping on first dim, padding on second, third stays the same
+        x = np.random.randn(20, 8, 3).astype(np.float32)
+        shape = np.array([10, 9], dtype=np.int64)
+        y = np.zeros([10, 9, 3], dtype=np.float32)
+        y[:, :8, :] = x[5:15, :, :]
+
+        expect(
+            node,
+            inputs=[x, shape],
+            outputs=[y],
+            name="test_center_crop_pad_crop_axes_hwc",
+        )
+
+    @staticmethod
+    def export_center_crop_pad_crop_negative_axes_hwc() -> None:
+        node = onnx.helper.make_node(
+            "CenterCropPad",
+            inputs=["x", "shape"],
+            outputs=["y"],
+            axes=[-3, -2],
+        )
+
+        # Cropping on first dim, padding on second, third stays the same
+        x = np.random.randn(20, 8, 3).astype(np.float32)
+        shape = np.array([10, 9], dtype=np.int64)
+        y = np.zeros([10, 9, 3], dtype=np.float32)
+        y[:, :8, :] = x[5:15, :, :]
+
+        expect(
+            node,
+            inputs=[x, shape],
+            outputs=[y],
+            name="test_center_crop_pad_crop_negative_axes_hwc",
+        )
+
+    @staticmethod
+    def export_center_crop_pad_crop_axes_chw() -> None:
+        node = onnx.helper.make_node(
+            "CenterCropPad",
+            inputs=["x", "shape"],
+            outputs=["y"],
+            axes=[1, 2],
+        )
+
+        # Cropping on second dim, padding on third, first stays the same
+        x = np.random.randn(3, 20, 8).astype(np.float32)
+        shape = np.array([10, 9], dtype=np.int64)
+        y = np.zeros([3, 10, 9], dtype=np.float32)
+        y[:, :, :8] = x[:, 5:15, :]
+
+        expect(
+            node,
+            inputs=[x, shape],
+            outputs=[y],
+            name="test_center_crop_pad_crop_axes_chw",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/clip.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c5ec6a59fda3e51e3e34a6f61aa3543a1ec00a9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/clip.py
@@ -0,0 +1,144 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Clip(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Clip",
+            inputs=["x", "min", "max"],
+            outputs=["y"],
+        )
+
+        x = np.array([-2, 0, 2]).astype(np.float32)
+        min_val = np.float32(-1)
+        max_val = np.float32(1)
+        y = np.clip(x, min_val, max_val)  # expected output [-1., 0., 1.]
+        expect(
+            node, inputs=[x, min_val, max_val], outputs=[y], name="test_clip_example"
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, min_val, max_val)
+        expect(node, inputs=[x, min_val, max_val], outputs=[y], name="test_clip")
+        node = onnx.helper.make_node(
+            "Clip",
+            inputs=["x", "min", "max"],
+            outputs=["y"],
+        )
+
+        min_val = np.float32(-5)
+        max_val = np.float32(5)
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.array([-1, 0, 1]).astype(np.float32)
+        expect(
+            node, inputs=[x, min_val, max_val], outputs=[y], name="test_clip_inbounds"
+        )
+
+        x = np.array([-6, 0, 6]).astype(np.float32)
+        y = np.array([-5, 0, 5]).astype(np.float32)
+        expect(
+            node, inputs=[x, min_val, max_val], outputs=[y], name="test_clip_outbounds"
+        )
+
+        x = np.array([-1, 0, 6]).astype(np.float32)
+        y = np.array([-1, 0, 5]).astype(np.float32)
+        expect(
+            node,
+            inputs=[x, min_val, max_val],
+            outputs=[y],
+            name="test_clip_splitbounds",
+        )
+
+        x = np.array([-2, 0, 6]).astype(np.float32)
+        y = np.array([1, 1, 1]).astype(np.float32)
+        min_val = np.float32(2)
+        max_val = np.float32(1)
+        expect(
+            node,
+            inputs=[x, min_val, max_val],
+            outputs=[y],
+            name="test_clip_min_greater_than_max",
+        )
+
+    @staticmethod
+    def export_clip_default() -> None:
+        node = onnx.helper.make_node(
+            "Clip",
+            inputs=["x", "min"],
+            outputs=["y"],
+        )
+        min_val = np.float32(0)
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, min_val, np.inf)
+        expect(node, inputs=[x, min_val], outputs=[y], name="test_clip_default_min")
+
+        no_min = ""  # optional input, not supplied
+        node = onnx.helper.make_node(
+            "Clip",
+            inputs=["x", no_min, "max"],
+            outputs=["y"],
+        )
+        max_val = np.float32(0)
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, -np.inf, max_val)
+        expect(node, inputs=[x, max_val], outputs=[y], name="test_clip_default_max")
+
+        no_max = ""  # optional input, not supplied
+        node = onnx.helper.make_node(
+            "Clip",
+            inputs=["x", no_min, no_max],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.array([-1, 0, 1]).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_clip_default_inbounds")
+
+    @staticmethod
+    def export_clip_default_int8() -> None:
+        node = onnx.helper.make_node(
+            "Clip",
+            inputs=["x", "min"],
+            outputs=["y"],
+        )
+        min_val = np.int8(0)
+        x = np.random.randn(3, 4, 5).astype(np.int8)
+        y = np.clip(x, min_val, np.iinfo(np.int8).max)
+        expect(
+            node, inputs=[x, min_val], outputs=[y], name="test_clip_default_int8_min"
+        )
+
+        no_min = ""  # optional input, not supplied
+        node = onnx.helper.make_node(
+            "Clip",
+            inputs=["x", no_min, "max"],
+            outputs=["y"],
+        )
+        max_val = np.int8(0)
+        x = np.random.randn(3, 4, 5).astype(np.int8)
+        y = np.clip(x, np.iinfo(np.int8).min, max_val)
+        expect(
+            node, inputs=[x, max_val], outputs=[y], name="test_clip_default_int8_max"
+        )
+
+        no_max = ""  # optional input, not supplied
+        node = onnx.helper.make_node(
+            "Clip",
+            inputs=["x", no_min, no_max],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.int8)
+        y = np.array([-1, 0, 1]).astype(np.int8)
+        expect(node, inputs=[x], outputs=[y], name="test_clip_default_int8_inbounds")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/elu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/elu.py
new file mode 100644
index 0000000000000000000000000000000000000000..d70d9a4be5bd05671844fa378c8f70684bb99f42
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/elu.py
@@ -0,0 +1,37 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Elu(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node("Elu", inputs=["x"], outputs=["y"], alpha=2.0)
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        # expected output [-1.2642411, 0., 1.]
+        y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0
+        expect(node, inputs=[x], outputs=[y], name="test_elu_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0
+        expect(node, inputs=[x], outputs=[y], name="test_elu")
+
+    @staticmethod
+    def export_elu_default() -> None:
+        default_alpha = 1.0
+        node = onnx.helper.make_node(
+            "Elu",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha
+        expect(node, inputs=[x], outputs=[y], name="test_elu_default")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/equal.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/equal.py
new file mode 100644
index 0000000000000000000000000000000000000000..af04a02320d3b3d0dc38c2f6fd68b369fb48239e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/equal.py
@@ -0,0 +1,92 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Equal(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Equal",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = (np.random.randn(3, 4, 5) * 10).astype(np.int32)
+        y = (np.random.randn(3, 4, 5) * 10).astype(np.int32)
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal")
+
+        x = (np.random.randn(3, 4, 5) * 10).astype(np.int8)
+        y = (np.random.randn(3, 4, 5) * 10).astype(np.int8)
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal_int8")
+
+        x = (np.random.randn(3, 4, 5) * 10).astype(np.int16)
+        y = (np.random.randn(3, 4, 5) * 10).astype(np.int16)
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal_int16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal_uint8")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal_uint16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal_uint32")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal_uint64")
+
+    @staticmethod
+    def export_equal_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "Equal",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = (np.random.randn(3, 4, 5) * 10).astype(np.int32)
+        y = (np.random.randn(5) * 10).astype(np.int32)
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal_bcast")
+
+    @staticmethod
+    def export_equal_string() -> None:
+        node = onnx.helper.make_node(
+            "Equal",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+        x = np.array(["string1", "string2"], dtype=np.dtype(object))
+        y = np.array(["string1", "string3"], dtype=np.dtype(object))
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal_string")
+
+    @staticmethod
+    def export_equal_string_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "Equal",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+        x = np.array(["string1", "string2"], dtype=np.dtype(object))
+        y = np.array(["string1"], dtype=np.dtype(object))
+        z = np.equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_equal_string_broadcast")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/erf.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/erf.py
new file mode 100644
index 0000000000000000000000000000000000000000..807e3779ae3b8b2c371a230fc5c8dc9e69c77206
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/erf.py
@@ -0,0 +1,26 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import math
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Erf(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Erf",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        y = np.vectorize(math.erf)(x).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_erf")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/exp.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/exp.py
new file mode 100644
index 0000000000000000000000000000000000000000..d172ff64ac96e9e1b8f1dff15a63691b89a5df94
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/exp.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Exp(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Exp",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.exp(x)  # expected output [0.36787945, 1., 2.71828175]
+        expect(node, inputs=[x], outputs=[y], name="test_exp_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.exp(x)
+        expect(node, inputs=[x], outputs=[y], name="test_exp")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/expand.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/expand.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8637c153146c58d7108910eca6adc6278738bf2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/expand.py
@@ -0,0 +1,66 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Expand(Base):
+    @staticmethod
+    def export_dim_changed() -> None:
+        node = onnx.helper.make_node(
+            "Expand",
+            inputs=["data", "new_shape"],
+            outputs=["expanded"],
+        )
+        shape = [3, 1]
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[1.], [2.], [3.]]
+        new_shape = [2, 1, 6]
+        expanded = data * np.ones(new_shape, dtype=np.float32)
+        # print(expanded)
+        # [[[1., 1., 1., 1., 1., 1.],
+        #  [2., 2., 2., 2., 2., 2.],
+        #  [3., 3., 3., 3., 3., 3.]],
+        #
+        # [[1., 1., 1., 1., 1., 1.],
+        #  [2., 2., 2., 2., 2., 2.],
+        #  [3., 3., 3., 3., 3., 3.]]]
+        new_shape = np.array(new_shape, dtype=np.int64)
+        expect(
+            node,
+            inputs=[data, new_shape],
+            outputs=[expanded],
+            name="test_expand_dim_changed",
+        )
+
+    @staticmethod
+    def export_dim_unchanged() -> None:
+        node = onnx.helper.make_node(
+            "Expand",
+            inputs=["data", "new_shape"],
+            outputs=["expanded"],
+        )
+        shape = [3, 1]
+        new_shape = [3, 4]
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[1.], [2.], [3.]]
+        expanded = np.tile(data, 4)
+        # print(expanded)
+        # [[1., 1., 1., 1.],
+        # [2., 2., 2., 2.],
+        # [3., 3., 3., 3.]]
+        new_shape = np.array(new_shape, dtype=np.int64)
+        expect(
+            node,
+            inputs=[data, new_shape],
+            outputs=[expanded],
+            name="test_expand_dim_unchanged",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/eyelike.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/eyelike.py
new file mode 100644
index 0000000000000000000000000000000000000000..df25e7eb59372b8815d6ce4df56b5e7c096a6e71
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/eyelike.py
@@ -0,0 +1,60 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class EyeLike(Base):
+    @staticmethod
+    def export_without_dtype() -> None:
+        shape = (4, 4)
+        node = onnx.helper.make_node(
+            "EyeLike",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(0, 100, size=shape, dtype=np.int32)
+        y = np.eye(shape[0], shape[1], dtype=np.int32)
+        expect(node, inputs=[x], outputs=[y], name="test_eyelike_without_dtype")
+
+    @staticmethod
+    def export_with_dtype() -> None:
+        shape = (3, 4)
+        node = onnx.helper.make_node(
+            "EyeLike",
+            inputs=["x"],
+            outputs=["y"],
+            dtype=onnx.TensorProto.DOUBLE,
+        )
+
+        x = np.random.randint(0, 100, size=shape, dtype=np.int32)
+        y = np.eye(shape[0], shape[1], dtype=np.float64)
+        expect(node, inputs=[x], outputs=[y], name="test_eyelike_with_dtype")
+
+    @staticmethod
+    def export_populate_off_main_diagonal() -> None:
+        shape = (4, 5)
+        off_diagonal_offset = 1
+        node = onnx.helper.make_node(
+            "EyeLike",
+            inputs=["x"],
+            outputs=["y"],
+            k=off_diagonal_offset,
+            dtype=onnx.TensorProto.FLOAT,
+        )
+
+        x = np.random.randint(0, 100, size=shape, dtype=np.int32)
+        y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_eyelike_populate_off_main_diagonal",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/flatten.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/flatten.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec57ca488e2aa1d048a4b4f8c3920e3a2e7ad537
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/flatten.py
@@ -0,0 +1,65 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Flatten(Base):
+    @staticmethod
+    def export() -> None:
+        shape = (2, 3, 4, 5)
+        a = np.random.random_sample(shape).astype(np.float32)
+
+        for i in range(len(shape)):
+            node = onnx.helper.make_node(
+                "Flatten",
+                inputs=["a"],
+                outputs=["b"],
+                axis=i,
+            )
+
+            new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)
+            b = np.reshape(a, new_shape)
+            expect(node, inputs=[a], outputs=[b], name="test_flatten_axis" + str(i))
+
+    @staticmethod
+    def export_flatten_with_default_axis() -> None:
+        node = onnx.helper.make_node(
+            "Flatten",
+            inputs=["a"],
+            outputs=["b"],  # Default value for axis: axis=1
+        )
+
+        shape = (5, 4, 3, 2)
+        a = np.random.random_sample(shape).astype(np.float32)
+        new_shape = (5, 24)
+        b = np.reshape(a, new_shape)
+        expect(node, inputs=[a], outputs=[b], name="test_flatten_default_axis")
+
+    @staticmethod
+    def export_flatten_negative_axis() -> None:
+        shape = (2, 3, 4, 5)
+        a = np.random.random_sample(shape).astype(np.float32)
+
+        for i in range(-len(shape), 0):
+            node = onnx.helper.make_node(
+                "Flatten",
+                inputs=["a"],
+                outputs=["b"],
+                axis=i,
+            )
+
+            new_shape = (np.prod(shape[0:i]).astype(int), -1)
+            b = np.reshape(a, new_shape)
+            expect(
+                node,
+                inputs=[a],
+                outputs=[b],
+                name="test_flatten_negative_axis" + str(abs(i)),
+            )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/floor.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/floor.py
new file mode 100644
index 0000000000000000000000000000000000000000..c142d1c39a7920abf79aad7d35626301f4db82e5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/floor.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Floor(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Floor",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1.5, 1.2, 2]).astype(np.float32)
+        y = np.floor(x)  # expected output [-2., 1., 2.]
+        expect(node, inputs=[x], outputs=[y], name="test_floor_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.floor(x)
+        expect(node, inputs=[x], outputs=[y], name="test_floor")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gather.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gather.py
new file mode 100644
index 0000000000000000000000000000000000000000..37aa464f92950d61ef9e3a5546d0d48865852a40
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gather.py
@@ -0,0 +1,91 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Gather(Base):
+    @staticmethod
+    def export_gather_0() -> None:
+        node = onnx.helper.make_node(
+            "Gather",
+            inputs=["data", "indices"],
+            outputs=["y"],
+            axis=0,
+        )
+        data = np.random.randn(5, 4, 3, 2).astype(np.float32)
+        indices = np.array([0, 1, 3])
+        y = np.take(data, indices, axis=0)
+
+        expect(
+            node,
+            inputs=[data, indices.astype(np.int64)],
+            outputs=[y],
+            name="test_gather_0",
+        )
+
+    @staticmethod
+    def export_gather_1() -> None:
+        node = onnx.helper.make_node(
+            "Gather",
+            inputs=["data", "indices"],
+            outputs=["y"],
+            axis=1,
+        )
+        data = np.random.randn(5, 4, 3, 2).astype(np.float32)
+        indices = np.array([0, 1, 3])
+        y = np.take(data, indices, axis=1)
+
+        expect(
+            node,
+            inputs=[data, indices.astype(np.int64)],
+            outputs=[y],
+            name="test_gather_1",
+        )
+
+    @staticmethod
+    def export_gather_2d_indices() -> None:
+        node = onnx.helper.make_node(
+            "Gather",
+            inputs=["data", "indices"],
+            outputs=["y"],
+            axis=1,
+        )
+        data = np.random.randn(3, 3).astype(np.float32)
+        indices = np.array([[0, 2]])
+        y = np.take(data, indices, axis=1)
+
+        expect(
+            node,
+            inputs=[data, indices.astype(np.int64)],
+            outputs=[y],
+            name="test_gather_2d_indices",
+        )
+
+    @staticmethod
+    def export_gather_negative_indices() -> None:
+        node = onnx.helper.make_node(
+            "Gather",
+            inputs=["data", "indices"],
+            outputs=["y"],
+            axis=0,
+        )
+        data = np.arange(10).astype(np.float32)
+        indices = np.array([0, -9, -10])
+        y = np.take(data, indices, axis=0)
+
+        # print(y)
+        # [0. 1. 0.]
+
+        expect(
+            node,
+            inputs=[data, indices.astype(np.int64)],
+            outputs=[y],
+            name="test_gather_negative_indices",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gatherelements.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gatherelements.py
new file mode 100644
index 0000000000000000000000000000000000000000..db4893e7278618b11aababb9467b55d802e3748f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gatherelements.py
@@ -0,0 +1,93 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+# The below GatherElements' numpy implementation is from https://stackoverflow.com/a/46204790/11767360
+def gather_elements(data, indices, axis=0):
+    data_swaped = np.swapaxes(data, 0, axis)
+    index_swaped = np.swapaxes(indices, 0, axis)
+    gathered = np.choose(index_swaped, data_swaped, mode="wrap")
+    y = np.swapaxes(gathered, 0, axis)
+    return y
+
+
+class GatherElements(Base):
+    @staticmethod
+    def export_gather_elements_0() -> None:
+        axis = 1
+        node = onnx.helper.make_node(
+            "GatherElements",
+            inputs=["data", "indices"],
+            outputs=["y"],
+            axis=axis,
+        )
+        data = np.array([[1, 2], [3, 4]], dtype=np.float32)
+        indices = np.array([[0, 0], [1, 0]], dtype=np.int32)
+
+        y = gather_elements(data, indices, axis)
+        # print(y) produces
+        # [[1, 1],
+        #  [4, 3]]
+
+        expect(
+            node,
+            inputs=[data, indices.astype(np.int64)],
+            outputs=[y],
+            name="test_gather_elements_0",
+        )
+
+    @staticmethod
+    def export_gather_elements_1() -> None:
+        axis = 0
+        node = onnx.helper.make_node(
+            "GatherElements",
+            inputs=["data", "indices"],
+            outputs=["y"],
+            axis=axis,
+        )
+        data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
+        indices = np.array([[1, 2, 0], [2, 0, 0]], dtype=np.int32)
+
+        y = gather_elements(data, indices, axis)
+        # print(y) produces
+        # [[4, 8, 3],
+        #  [7, 2, 3]]
+
+        expect(
+            node,
+            inputs=[data, indices.astype(np.int64)],
+            outputs=[y],
+            name="test_gather_elements_1",
+        )
+
+    @staticmethod
+    def export_gather_elements_negative_indices() -> None:
+        axis = 0
+        node = onnx.helper.make_node(
+            "GatherElements",
+            inputs=["data", "indices"],
+            outputs=["y"],
+            axis=axis,
+        )
+        data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
+        indices = np.array([[-1, -2, 0], [-2, 0, 0]], dtype=np.int32)
+
+        y = gather_elements(data, indices, axis)
+        # print(y) produces
+        # [[7, 5, 3],
+        #  [4, 2, 3]]
+
+        expect(
+            node,
+            inputs=[data, indices.astype(np.int64)],
+            outputs=[y],
+            name="test_gather_elements_negative_indices",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gathernd.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gathernd.py
new file mode 100644
index 0000000000000000000000000000000000000000..49abf129723d22414053d802a89950c927086a86
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gathernd.py
@@ -0,0 +1,121 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def gather_nd_impl(
+    data: np.ndarray, indices: np.ndarray, batch_dims: int
+) -> np.ndarray:
+    # Note the data rank - will be reused multiple times later
+    data_rank = len(data.shape)
+
+    # Check input tensors' shape/rank condition
+    assert indices.shape[-1] <= data_rank
+
+    # The list of data/indice shape of batch_dims
+    batch_dims_shape = []
+
+    # The number of elements in the batch_dims for data/indice array
+    batch_dims_size = 1
+
+    # Check the shape of indice and data are identical for batch dims.
+    for i in range(batch_dims):
+        batch_dims_shape.append(indices.shape[i])
+        batch_dims_size *= indices.shape[i]
+
+    # Compute output of the op as below
+
+    # Compute shape of output array
+    output_shape = (
+        batch_dims_shape + list(indices.shape)[batch_dims:-1]
+        if (indices.shape[-1] == data_rank - batch_dims)
+        else batch_dims_shape
+        + list(indices.shape)[batch_dims:-1]
+        + list(data.shape)[batch_dims + indices.shape[-1] :]
+    )
+
+    # Placeholder for output data
+    output_data_buffer = []
+
+    # Flatten 'indices' to 2D array
+    reshaped_indices = indices.reshape(batch_dims_size, -1, indices.shape[-1])
+
+    # Flatten 'data' to array of shape (batch_dim_size, data.shape[batch_dimes:])
+    reshaped_data = data.reshape((batch_dims_size, *data.shape[batch_dims:]))
+
+    # gather each scalar value from 'data'
+    for batch_dim in range(reshaped_indices.shape[0]):
+        for outer_dim in range(reshaped_indices.shape[1]):
+            gather_index = tuple(reshaped_indices[batch_dim][outer_dim])
+            output_data_buffer.append(reshaped_data[(batch_dim, *gather_index)])
+    return np.asarray(output_data_buffer, dtype=data.dtype).reshape(output_shape)
+
+
+class GatherND(Base):
+    @staticmethod
+    def export_int32() -> None:
+        node = onnx.helper.make_node(
+            "GatherND",
+            inputs=["data", "indices"],
+            outputs=["output"],
+        )
+
+        data = np.array([[0, 1], [2, 3]], dtype=np.int32)
+        indices = np.array([[0, 0], [1, 1]], dtype=np.int64)
+        output = gather_nd_impl(data, indices, 0)
+        expected_output = np.array([0, 3], dtype=np.int32)
+        assert np.array_equal(output, expected_output)
+        expect(
+            node,
+            inputs=[data, indices],
+            outputs=[output],
+            name="test_gathernd_example_int32",
+        )
+
+    @staticmethod
+    def export_float32() -> None:
+        node = onnx.helper.make_node(
+            "GatherND",
+            inputs=["data", "indices"],
+            outputs=["output"],
+        )
+
+        data = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.float32)
+        indices = np.array([[[0, 1]], [[1, 0]]], dtype=np.int64)
+        output = gather_nd_impl(data, indices, 0)
+        expected_output = np.array([[[2, 3]], [[4, 5]]], dtype=np.float32)
+        assert np.array_equal(output, expected_output)
+        expect(
+            node,
+            inputs=[data, indices],
+            outputs=[output],
+            name="test_gathernd_example_float32",
+        )
+
+    @staticmethod
+    def export_int32_batchdim_1() -> None:
+        node = onnx.helper.make_node(
+            "GatherND",
+            inputs=["data", "indices"],
+            outputs=["output"],
+            batch_dims=1,
+        )
+
+        data = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.int32)
+        indices = np.array([[1], [0]], dtype=np.int64)
+        output = gather_nd_impl(data, indices, 1)
+        expected_output = np.array([[2, 3], [4, 5]], dtype=np.int32)
+        assert np.array_equal(output, expected_output)
+        expect(
+            node,
+            inputs=[data, indices],
+            outputs=[output],
+            name="test_gathernd_example_int32_batch_dim1",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gelu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gelu.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3f8b90159400598c5ca3c7e34780a2e7364d8ce
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gelu.py
@@ -0,0 +1,52 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import math
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Gelu(Base):
+    @staticmethod
+    def export_gelu_tanh() -> None:
+        node = onnx.helper.make_node(
+            "Gelu", inputs=["x"], outputs=["y"], approximate="tanh"
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        # expected output [-0.158808, 0., 0.841192]
+        y = (
+            0.5
+            * x
+            * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
+        ).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_gelu_tanh_1")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        # expected output [2.9963627, 3.99993, 4.9999995]
+        y = (
+            0.5
+            * x
+            * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
+        ).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_gelu_tanh_2")
+
+    @staticmethod
+    def export_gelu_default() -> None:
+        node = onnx.helper.make_node("Gelu", inputs=["x"], outputs=["y"])
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        # expected output [-0.15865526, 0., 0.84134474]
+        y = (0.5 * x * (1 + np.vectorize(math.erf)(x / np.sqrt(2)))).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_gelu_default_1")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        # expected output [2.99595031, 3.99987331, 4.99999857]
+        y = (0.5 * x * (1 + np.vectorize(math.erf)(x / np.sqrt(2)))).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_gelu_default_2")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gemm.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gemm.py
new file mode 100644
index 0000000000000000000000000000000000000000..287ea5570b9c6feee90dc0d4d88fe85da4ba972c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gemm.py
@@ -0,0 +1,157 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def gemm_reference_implementation(
+    A: np.ndarray,
+    B: np.ndarray,
+    C: np.ndarray | None = None,
+    alpha: float = 1.0,
+    beta: float = 1.0,
+    transA: int = 0,
+    transB: int = 0,
+) -> np.ndarray:
+    A = A if transA == 0 else A.T
+    B = B if transB == 0 else B.T
+    C = C if C is not None else np.array(0)
+
+    Y = alpha * np.dot(A, B) + beta * C
+
+    return Y.astype(A.dtype)
+
+
+class Gemm(Base):
+    @staticmethod
+    def export_default_zero_bias() -> None:
+        node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"])
+        a = np.random.ranf([3, 5]).astype(np.float32)
+        b = np.random.ranf([5, 4]).astype(np.float32)
+        c = np.zeros([1, 4]).astype(np.float32)
+        y = gemm_reference_implementation(a, b, c)
+        expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_default_zero_bias")
+
+    @staticmethod
+    def export_default_no_bias() -> None:
+        node = onnx.helper.make_node("Gemm", inputs=["a", "b"], outputs=["y"])
+        a = np.random.ranf([2, 10]).astype(np.float32)
+        b = np.random.ranf([10, 3]).astype(np.float32)
+        y = gemm_reference_implementation(a, b)
+        expect(node, inputs=[a, b], outputs=[y], name="test_gemm_default_no_bias")
+
+    @staticmethod
+    def export_default_scalar_bias() -> None:
+        node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"])
+        a = np.random.ranf([2, 3]).astype(np.float32)
+        b = np.random.ranf([3, 4]).astype(np.float32)
+        c = np.array(3.14).astype(np.float32)
+        y = gemm_reference_implementation(a, b, c)
+        expect(
+            node, inputs=[a, b, c], outputs=[y], name="test_gemm_default_scalar_bias"
+        )
+
+    @staticmethod
+    def export_default_single_elem_vector_bias() -> None:
+        node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"])
+        a = np.random.ranf([3, 7]).astype(np.float32)
+        b = np.random.ranf([7, 3]).astype(np.float32)
+        c = np.random.ranf([1]).astype(np.float32)
+        y = gemm_reference_implementation(a, b, c)
+        expect(
+            node,
+            inputs=[a, b, c],
+            outputs=[y],
+            name="test_gemm_default_single_elem_vector_bias",
+        )
+
+    @staticmethod
+    def export_default_vector_bias() -> None:
+        node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"])
+        a = np.random.ranf([2, 7]).astype(np.float32)
+        b = np.random.ranf([7, 4]).astype(np.float32)
+        c = np.random.ranf([1, 4]).astype(np.float32)
+        y = gemm_reference_implementation(a, b, c)
+        expect(
+            node, inputs=[a, b, c], outputs=[y], name="test_gemm_default_vector_bias"
+        )
+
+    @staticmethod
+    def export_default_matrix_bias() -> None:
+        node = onnx.helper.make_node("Gemm", inputs=["a", "b", "c"], outputs=["y"])
+        a = np.random.ranf([3, 6]).astype(np.float32)
+        b = np.random.ranf([6, 4]).astype(np.float32)
+        c = np.random.ranf([3, 4]).astype(np.float32)
+        y = gemm_reference_implementation(a, b, c)
+        expect(
+            node, inputs=[a, b, c], outputs=[y], name="test_gemm_default_matrix_bias"
+        )
+
+    @staticmethod
+    def export_transposeA() -> None:
+        node = onnx.helper.make_node(
+            "Gemm", inputs=["a", "b", "c"], outputs=["y"], transA=1
+        )
+        a = np.random.ranf([6, 3]).astype(np.float32)
+        b = np.random.ranf([6, 4]).astype(np.float32)
+        c = np.zeros([1, 4]).astype(np.float32)
+        y = gemm_reference_implementation(a, b, c, transA=1)
+        expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_transposeA")
+
+    @staticmethod
+    def export_transposeB() -> None:
+        node = onnx.helper.make_node(
+            "Gemm", inputs=["a", "b", "c"], outputs=["y"], transB=1
+        )
+        a = np.random.ranf([3, 6]).astype(np.float32)
+        b = np.random.ranf([4, 6]).astype(np.float32)
+        c = np.zeros([1, 4]).astype(np.float32)
+        y = gemm_reference_implementation(a, b, c, transB=1)
+        expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_transposeB")
+
+    @staticmethod
+    def export_alpha() -> None:
+        node = onnx.helper.make_node(
+            "Gemm", inputs=["a", "b", "c"], outputs=["y"], alpha=0.5
+        )
+        a = np.random.ranf([3, 5]).astype(np.float32)
+        b = np.random.ranf([5, 4]).astype(np.float32)
+        c = np.zeros([1, 4]).astype(np.float32)
+        y = gemm_reference_implementation(a, b, c, alpha=0.5)
+        expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_alpha")
+
+    @staticmethod
+    def export_beta() -> None:
+        node = onnx.helper.make_node(
+            "Gemm", inputs=["a", "b", "c"], outputs=["y"], beta=0.5
+        )
+        a = np.random.ranf([2, 7]).astype(np.float32)
+        b = np.random.ranf([7, 4]).astype(np.float32)
+        c = np.random.ranf([1, 4]).astype(np.float32)
+        y = gemm_reference_implementation(a, b, c, beta=0.5)
+        expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_beta")
+
+    @staticmethod
+    def export_all_attributes() -> None:
+        node = onnx.helper.make_node(
+            "Gemm",
+            inputs=["a", "b", "c"],
+            outputs=["y"],
+            alpha=0.25,
+            beta=0.35,
+            transA=1,
+            transB=1,
+        )
+        a = np.random.ranf([4, 3]).astype(np.float32)
+        b = np.random.ranf([5, 4]).astype(np.float32)
+        c = np.random.ranf([1, 5]).astype(np.float32)
+        y = gemm_reference_implementation(
+            a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35
+        )
+        expect(node, inputs=[a, b, c], outputs=[y], name="test_gemm_all_attributes")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/globalaveragepool.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/globalaveragepool.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fddab4b7d42f40be450c6c6f5d12145cec47fae
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/globalaveragepool.py
@@ -0,0 +1,44 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class GlobalAveragePool(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "GlobalAveragePool",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.random.randn(1, 3, 5, 5).astype(np.float32)
+        y = np.mean(x, axis=tuple(range(2, np.ndim(x))), keepdims=True)
+        expect(node, inputs=[x], outputs=[y], name="test_globalaveragepool")
+
+    @staticmethod
+    def export_globalaveragepool_precomputed() -> None:
+        node = onnx.helper.make_node(
+            "GlobalAveragePool",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3],
+                        [4, 5, 6],
+                        [7, 8, 9],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[5]]]]).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_globalaveragepool_precomputed")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/globalmaxpool.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/globalmaxpool.py
new file mode 100644
index 0000000000000000000000000000000000000000..8af125cb0cb559f7bacfd5d30f73c8b2e01d8a21
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/globalmaxpool.py
@@ -0,0 +1,44 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class GlobalMaxPool(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "GlobalMaxPool",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.random.randn(1, 3, 5, 5).astype(np.float32)
+        y = np.max(x, axis=tuple(range(2, np.ndim(x))), keepdims=True)
+        expect(node, inputs=[x], outputs=[y], name="test_globalmaxpool")
+
+    @staticmethod
+    def export_globalmaxpool_precomputed() -> None:
+        node = onnx.helper.make_node(
+            "GlobalMaxPool",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3],
+                        [4, 5, 6],
+                        [7, 8, 9],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[9]]]]).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_globalmaxpool_precomputed")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/greater.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/greater.py
new file mode 100644
index 0000000000000000000000000000000000000000..98f16fd51d7301c12806205cabcd8ee3837a7515
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/greater.py
@@ -0,0 +1,68 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Greater(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Greater",
+            inputs=["x", "y"],
+            outputs=["greater"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(3, 4, 5).astype(np.float32)
+        z = np.greater(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater")
+
+        x = np.random.randn(3, 4, 5).astype(np.int8)
+        y = np.random.randn(3, 4, 5).astype(np.int8)
+        z = np.greater(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_int8")
+
+        x = np.random.randn(3, 4, 5).astype(np.int16)
+        y = np.random.randn(3, 4, 5).astype(np.int16)
+        z = np.greater(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_int16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        z = np.greater(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_uint8")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        z = np.greater(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_uint16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        z = np.greater(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_uint32")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        z = np.greater(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_uint64")
+
+    @staticmethod
+    def export_greater_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "Greater",
+            inputs=["x", "y"],
+            outputs=["greater"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(5).astype(np.float32)
+        z = np.greater(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_bcast")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/greater_equal.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/greater_equal.py
new file mode 100644
index 0000000000000000000000000000000000000000..d76e85f01a71e8eb20bc5bf49add22c8ffc95663
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/greater_equal.py
@@ -0,0 +1,68 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Greater(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "GreaterOrEqual",
+            inputs=["x", "y"],
+            outputs=["greater_equal"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(3, 4, 5).astype(np.float32)
+        z = np.greater_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal")
+
+        x = np.random.randn(3, 4, 5).astype(np.int8)
+        y = np.random.randn(3, 4, 5).astype(np.int8)
+        z = np.greater_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal_int8")
+
+        x = np.random.randn(3, 4, 5).astype(np.int16)
+        y = np.random.randn(3, 4, 5).astype(np.int16)
+        z = np.greater_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal_int16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        z = np.greater_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal_uint8")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        z = np.greater_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal_uint16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        z = np.greater_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal_uint32")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        z = np.greater_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal_uint64")
+
+    @staticmethod
+    def export_greater_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "GreaterOrEqual",
+            inputs=["x", "y"],
+            outputs=["greater_equal"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(5).astype(np.float32)
+        z = np.greater_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_greater_equal_bcast")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gridsample.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gridsample.py
new file mode 100644
index 0000000000000000000000000000000000000000..522b45386215be94c565c95b0a2edcf5dbe1f5aa
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gridsample.py
@@ -0,0 +1,642 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class GridSample(Base):
+    @staticmethod
+    def export_gridsample() -> None:
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="linear",
+            padding_mode="zeros",
+            align_corners=0,
+        )
+        # X shape, [N, C, H, W] - [1, 1, 4, 4]
+        X = np.array(
+            [
+                [
+                    [
+                        [0.0, 1.0, 2.0, 3.0],
+                        [4.0, 5.0, 6.0, 7.0],
+                        [8.0, 9.0, 10.0, 11.0],
+                        [12.0, 13.0, 14.0, 15.0],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+        # Grid shape, [N, H_out, W_out, 2] - [1, 6, 6, 2]
+        Grid = np.array(
+            [
+                [
+                    [
+                        [-1.0000, -1.0000],
+                        [-0.6000, -1.0000],
+                        [-0.2000, -1.0000],
+                        [0.2000, -1.0000],
+                        [0.6000, -1.0000],
+                        [1.0000, -1.0000],
+                    ],
+                    [
+                        [-1.0000, -0.6000],
+                        [-0.6000, -0.6000],
+                        [-0.2000, -0.6000],
+                        [0.2000, -0.6000],
+                        [0.6000, -0.6000],
+                        [1.0000, -0.6000],
+                    ],
+                    [
+                        [-1.0000, -0.2000],
+                        [-0.6000, -0.2000],
+                        [-0.2000, -0.2000],
+                        [0.2000, -0.2000],
+                        [0.6000, -0.2000],
+                        [1.0000, -0.2000],
+                    ],
+                    [
+                        [-1.0000, 0.2000],
+                        [-0.6000, 0.2000],
+                        [-0.2000, 0.2000],
+                        [0.2000, 0.2000],
+                        [0.6000, 0.2000],
+                        [1.0000, 0.2000],
+                    ],
+                    [
+                        [-1.0000, 0.6000],
+                        [-0.6000, 0.6000],
+                        [-0.2000, 0.6000],
+                        [0.2000, 0.6000],
+                        [0.6000, 0.6000],
+                        [1.0000, 0.6000],
+                    ],
+                    [
+                        [-1.0000, 1.0000],
+                        [-0.6000, 1.0000],
+                        [-0.2000, 1.0000],
+                        [0.2000, 1.0000],
+                        [0.6000, 1.0000],
+                        [1.0000, 1.0000],
+                    ],
+                ]
+            ],
+            dtype=np.float32,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 6, 6]
+        Y = np.array(
+            [
+                [
+                    [
+                        [0.0000, 0.1500, 0.5500, 0.9500, 1.3500, 0.7500],
+                        [0.6000, 1.5000, 2.3000, 3.1000, 3.9000, 2.1000],
+                        [2.2000, 4.7000, 5.5000, 6.3000, 7.1000, 3.7000],
+                        [3.8000, 7.9000, 8.7000, 9.5000, 10.3000, 5.3000],
+                        [5.4000, 11.1000, 11.9000, 12.7000, 13.5000, 6.9000],
+                        [3.0000, 6.1500, 6.5500, 6.9500, 7.3500, 3.7500],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+        expect(node, inputs=[X, Grid], outputs=[Y], name="test_gridsample")
+
+    @staticmethod
+    def export_gridsample_paddingmode() -> None:
+        # X shape, [N, C, H, W] - [1, 1, 3, 2]
+        X = np.array(
+            [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
+            dtype=np.float32,
+        )
+        # Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2]
+        Grid = np.array(
+            [
+                [
+                    [
+                        [-10.0000, -10.0000],
+                        [-5.0000, -5.0000],
+                        [-0.2000, -0.2000],
+                        [10.0000, 10.0000],
+                    ],
+                    [
+                        [10.0000, 10.0000],
+                        [-0.2000, -0.2000],
+                        [5.0000, 5.0000],
+                        [10.0000, 10.0000],
+                    ],
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        # setting padding_mode = 'zeros'
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            padding_mode="zeros",
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_zeros = np.array(
+            [[[[0.0000, 0.0000, 1.7000, 0.0000], [0.0000, 1.7000, 0.0000, 0.0000]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_zeros],
+            name="test_gridsample_zeros_padding",
+        )
+
+        # setting padding_mode = 'border'
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            padding_mode="border",
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_border = np.array(
+            [[[[0.0000, 0.0000, 1.7000, 5.0000], [5.0000, 1.7000, 5.0000, 5.0000]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_border],
+            name="test_gridsample_border_padding",
+        )
+
+        # setting padding_mode = 'reflection'
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            padding_mode="reflection",
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_reflection = np.array(
+            [[[[2.5000, 0.0000, 1.7000, 2.5000], [2.5000, 1.7000, 5.0000, 2.5000]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_reflection],
+            name="test_gridsample_reflection_padding",
+        )
+
+    @staticmethod
+    def export_gridsample_mode_aligncorners() -> None:
+        # X shape, [N, C, H, W] - [1, 1, 3, 2]
+        X = np.array(
+            [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]],
+            dtype=np.float32,
+        )
+        # Grid shape, [N, H_out, W_out, 2] - [1, 2, 4, 2]
+        Grid = np.array(
+            [
+                [
+                    [
+                        [-1.0000, -1.0000],
+                        [-0.5000, -0.5000],
+                        [-0.2000, -0.2000],
+                        [0.0000, 0.0000],
+                    ],
+                    [
+                        [0.0000, 0.0000],
+                        [-0.2000, -0.2000],
+                        [0.5000, 0.5000],
+                        [1.0000, 1.0000],
+                    ],
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        # setting mode = 'bilinear', default align_corners = 0
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="linear",
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_bilinear = np.array(
+            [[[[0.0000, 0.5000, 1.7000, 2.5000], [2.5000, 1.7000, 4.5000, 1.2500]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_bilinear],
+            name="test_gridsample_bilinear",
+        )
+
+        # setting mode = 'bilinear', align_corners = 1
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="linear",
+            align_corners=1,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_align_corners = np.array(
+            [[[[0.0000, 1.2500, 2.0000, 2.5000], [2.5000, 2.0000, 3.7500, 5.0000]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_align_corners],
+            name="test_gridsample_aligncorners_true",
+        )
+
+        # setting mode = 'nearest'
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="nearest",
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_nearest = np.array(
+            [[[[0.0, 0.0, 2.0, 2.0], [2.0, 2.0, 5.0, 0.0]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node, inputs=[X, Grid], outputs=[Y_nearest], name="test_gridsample_nearest"
+        )
+
+        # setting mode = 'bicubic'
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="cubic",
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_bicubic = np.array(
+            [[[[-0.1406, 0.3828, 1.7556, 2.9688], [2.9688, 1.7556, 5.1445, 1.3906]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node, inputs=[X, Grid], outputs=[Y_bicubic], name="test_gridsample_bicubic"
+        )
+
+        # ============================================================================
+        # Additional tests
+        # The reference output tensors were generated using PyTorch 2.0.
+        Grid = np.array(
+            [
+                [
+                    [[-1.0, -0.8], [-0.6, -0.5], [-0.1, -0.2], [0.7, 0.0]],
+                    [[0.0, 0.4], [0.2, -0.2], [-0.3, 0.5], [-1.0, 1.0]],
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="nearest",
+            align_corners=0,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_nearest = np.array(
+            [[[[0.0, 0.0, 2.0, 3.0], [4.0, 3.0, 4.0, 4.0]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_nearest],
+            name="test_gridsample_nearest_align_corners_0_additional_1",
+        )
+
+        # setting mode = 'nearest'
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="nearest",
+            align_corners=1,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_nearest = np.array(
+            [[[[0.0, 0.0, 2.0, 3.0], [2.0, 3.0, 4.0, 4.0]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_nearest],
+            name="test_gridsample_nearest_align_corners_1_additional_1",
+        )
+
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="linear",
+            align_corners=0,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_bilinear = np.array(
+            [[[[0.0000, 0.4500, 1.8000, 2.4000], [3.7000, 2.1000, 3.7000, 1.0000]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_bilinear],
+            name="test_gridsample_bilinear_align_corners_0_additional_1",
+        )
+
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="linear",
+            align_corners=1,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_bilinear = np.array(
+            [[[[0.4000, 1.2000, 2.0500, 2.8500], [3.3000, 2.2000, 3.3500, 4.0000]]]],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_bilinear],
+            name="test_gridsample_bilinear_align_corners_1_additional_1",
+        )
+
+        # These two new bicubic tests produces slightly higher error ~5e-5
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="cubic",
+            align_corners=0,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_bicubic = np.array(
+            [
+                [
+                    [
+                        [-0.173250, 0.284265, 1.923106, 2.568000],
+                        [5.170375, 2.284414, 4.744844, 1.046875],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_bicubic],
+            name="test_gridsample_bicubic_align_corners_0_additional_1",
+        )
+
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="cubic",
+            align_corners=1,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_bicubic = np.array(
+            [
+                [
+                    [
+                        [0.304001, 1.128750, 2.266270, 3.144844],
+                        [4.531500, 2.455360, 4.599819, 4.000000],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_bicubic],
+            name="test_gridsample_bicubic_align_corners_1_additional_1",
+        )
+
+    @staticmethod
+    def export_volumeetric_gridsample_mode_aligncorners() -> None:
+        X = np.array(
+            [
+                [
+                    [
+                        [[1.0, 2.0], [3.0, 4.0]],
+                        [[5.0, 6.0], [7.0, 8.0]],
+                        [[9.0, 10.0], [11.0, 12.0]],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        Grid = np.array(
+            [
+                [
+                    [
+                        [[-1.0, -1.0, -1.0], [-1.0, -0.5, 0.3]],
+                        [[-0.5, -0.5, -0.5], [1.0, -0.6, -1.0]],
+                        [[-0.2, -0.2, -0.2], [0.4, 0.2, 0.6]],
+                        [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]],
+                    ],
+                    [
+                        [[0.0, 0.0, 0.0], [-1.0, 1.0, 0.0]],
+                        [[-0.2, -0.2, -0.2], [1.0, 0.4, -0.2]],
+                        [[0.5, 0.5, 0.5], [-1.0, -0.8, 0.8]],
+                        [[1.0, 1.0, 1.0], [0.4, 0.6, -0.3]],
+                    ],
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="nearest",
+            align_corners=0,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_nearest = np.array(
+            [
+                [
+                    [
+                        [[1.0, 5.0], [1.0, 0.0], [5.0, 12.0], [5.0, 5.0]],
+                        [[5.0, 0.0], [5.0, 0.0], [12.0, 9.0], [0.0, 8.0]],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_nearest],
+            name="test_gridsample_volumetric_nearest_align_corners_0",
+        )
+
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="nearest",
+            align_corners=1,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_nearest = np.array(
+            [
+                [
+                    [
+                        [[1.0, 5.0], [1.0, 2.0], [5.0, 12.0], [5.0, 5.0]],
+                        [[5.0, 7.0], [5.0, 8.0], [12.0, 9.0], [12.0, 8.0]],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_nearest],
+            name="test_gridsample_volumetric_nearest_align_corners_1",
+        )
+
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="linear",
+            align_corners=0,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_bilinear = np.array(
+            [
+                [
+                    [
+                        [
+                            [0.1250, 3.4000],
+                            [2.0000, 0.4500],
+                            [4.7000, 10.9000],
+                            [6.5000, 3.0000],
+                        ],
+                        [
+                            [6.5000, 1.7500],
+                            [4.7000, 3.3000],
+                            [11.0000, 2.5200],
+                            [1.5000, 5.4900],
+                        ],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_bilinear],
+            name="test_gridsample_volumetric_bilinear_align_corners_0",
+        )
+
+        node = onnx.helper.make_node(
+            "GridSample",
+            inputs=["X", "Grid"],
+            outputs=["Y"],
+            mode="linear",
+            align_corners=1,
+        )
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 2, 4]
+        Y_bilinear = np.array(
+            [
+                [
+                    [
+                        [
+                            [1.0000, 6.7000],
+                            [3.7500, 2.4000],
+                            [5.4000, 9.3000],
+                            [6.5000, 6.0000],
+                        ],
+                        [
+                            [6.5000, 7.0000],
+                            [5.4000, 6.6000],
+                            [9.2500, 8.4000],
+                            [12.0000, 6.1000],
+                        ],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, Grid],
+            outputs=[Y_bilinear],
+            name="test_gridsample_volumetric_bilinear_align_corners_1",
+        )
+
+    """
+    For someone who want to test by script. Comment it cause github ONNX CI
+    do not have the torch python package.
+    @staticmethod
+    def export_gridsample_torch():  # type: () -> None
+        node = onnx.helper.make_node(
+            'GridSample',
+            inputs=['X', 'Grid'],
+            outputs=['Y'],
+            mode='bilinear',
+            padding_mode='zeros',
+            align_corners=0,
+        )
+
+        # X shape, [N, C, H, W] - [1, 1, 4, 4]
+        # Grid shape, [N, H_out, W_out, 2] - [1, 6, 6, 2]
+        # Y shape, [N, C, H_out, W_out] - [1, 1, 6, 6]
+        import torch
+        X = torch.arange(3 * 3).view(1, 1, 3, 3).float()
+        d = torch.linspace(-1, 1, 6)
+        meshx, meshy = torch.meshgrid((d, d))
+        grid = torch.stack((meshy, meshx), 2)
+        Grid = grid.unsqueeze(0)
+        Y = torch.nn.functional.grid_sample(X, Grid, mode='bilinear',
+                                            padding_mode='zeros', align_corners=False)
+        expect(node, inputs=[X.numpy(), Grid.numpy()], outputs=[Y.numpy()],
+               name='test_gridsample_torch')
+    """
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/groupnormalization.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/groupnormalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..fce3f198217e06778404badc1d9f6235a12777f5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/groupnormalization.py
@@ -0,0 +1,78 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+# Group normalization's reference implementation
+def _group_normalization(x, num_groups, scale, bias, epsilon=1e-5):
+    # Assume channel is first dim
+    assert x.shape[1] % num_groups == 0
+    group_size = x.shape[1] // num_groups
+    # Reshape to [N, group_size, C/group_size, H, W, ...]
+    new_shape = [x.shape[0], num_groups, group_size, *list(x.shape[2:])]
+    x_reshaped = x.reshape(new_shape)
+    axes = tuple(range(2, len(new_shape)))
+    mean = np.mean(x_reshaped, axis=axes, keepdims=True)
+    var = np.var(x_reshaped, axis=axes, keepdims=True)
+    x_normalized = ((x_reshaped - mean) / np.sqrt(var + epsilon)).reshape(x.shape)
+    dim_ones = (1,) * (len(x.shape) - 2)
+    scale = scale.reshape(-1, *dim_ones)
+    bias = bias.reshape(-1, *dim_ones)
+    return scale * x_normalized + bias
+
+
+class GroupNormalization(Base):
+    @staticmethod
+    def export() -> None:
+        c = 4
+        num_groups = 2
+        x = np.random.randn(3, c, 2, 2).astype(np.float32)
+        scale = np.random.randn(c).astype(np.float32)
+        bias = np.random.randn(c).astype(np.float32)
+        y = _group_normalization(x, num_groups, scale, bias).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "GroupNormalization",
+            inputs=["x", "scale", "bias"],
+            outputs=["y"],
+            num_groups=num_groups,
+        )
+
+        expect(
+            node,
+            inputs=[x, scale, bias],
+            outputs=[y],
+            name="test_group_normalization_example",
+        )
+
+    @staticmethod
+    def export_epsilon() -> None:
+        c = 4
+        num_groups = 2
+        x = np.random.randn(3, c, 2, 2).astype(np.float32)
+        scale = np.random.randn(c).astype(np.float32)
+        bias = np.random.randn(c).astype(np.float32)
+        epsilon = 1e-2
+        y = _group_normalization(x, num_groups, scale, bias, epsilon).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "GroupNormalization",
+            inputs=["x", "scale", "bias"],
+            outputs=["y"],
+            epsilon=epsilon,
+            num_groups=num_groups,
+        )
+
+        expect(
+            node,
+            inputs=[x, scale, bias],
+            outputs=[y],
+            name="test_group_normalization_epsilon",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gru.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gru.py
new file mode 100644
index 0000000000000000000000000000000000000000..f205fa27e962479ce48e475d03c625b24deafcb4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/gru.py
@@ -0,0 +1,264 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class GRUHelper:
+    def __init__(self, **params: Any) -> None:
+        # GRU Input Names
+        X = "X"
+        W = "W"
+        R = "R"
+        B = "B"
+        H_0 = "initial_h"
+        LBR = "linear_before_reset"
+        LAYOUT = "layout"
+        number_of_gates = 3
+
+        required_inputs = [X, W, R]
+        for i in required_inputs:
+            assert i in params, f"Missing Required Input: {i}"
+
+        self.num_directions = params[W].shape[0]
+
+        if self.num_directions == 1:
+            for k, v in params.items():
+                if k != X:
+                    params[k] = np.squeeze(v, axis=0)
+
+            hidden_size = params[R].shape[-1]
+            batch_size = params[X].shape[1]
+
+            layout = params.get(LAYOUT, 0)
+            x = params[X]
+            x = x if layout == 0 else np.swapaxes(x, 0, 1)
+            b = (
+                params[B]
+                if B in params
+                else np.zeros(2 * number_of_gates * hidden_size)
+            )
+            h_0 = params[H_0] if H_0 in params else np.zeros((batch_size, hidden_size))
+            lbr = params.get(LBR, 0)
+
+            self.X = x
+            self.W = params[W]
+            self.R = params[R]
+            self.B = b
+            self.H_0 = h_0
+            self.LBR = lbr
+            self.LAYOUT = layout
+
+        else:
+            raise NotImplementedError()
+
+    def f(self, x: np.ndarray) -> np.ndarray:
+        return 1 / (1 + np.exp(-x))
+
+    def g(self, x: np.ndarray) -> np.ndarray:
+        return np.tanh(x)
+
+    def step(self) -> tuple[np.ndarray, np.ndarray]:
+        seq_length = self.X.shape[0]
+        hidden_size = self.H_0.shape[-1]
+        batch_size = self.X.shape[1]
+
+        Y = np.empty([seq_length, self.num_directions, batch_size, hidden_size])
+        h_list = []
+
+        [w_z, w_r, w_h] = np.split(self.W, 3)
+        [r_z, r_r, r_h] = np.split(self.R, 3)
+        [w_bz, w_br, w_bh, r_bz, r_br, r_bh] = np.split(self.B, 6)
+        gates_w = np.transpose(np.concatenate((w_z, w_r)))
+        gates_r = np.transpose(np.concatenate((r_z, r_r)))
+        gates_b = np.add(np.concatenate((w_bz, w_br)), np.concatenate((r_bz, r_br)))
+
+        H_t = self.H_0
+        for x in np.split(self.X, self.X.shape[0], axis=0):
+            gates = np.dot(x, gates_w) + np.dot(H_t, gates_r) + gates_b
+            z, r = np.split(gates, 2, -1)
+            z = self.f(z)
+            r = self.f(r)
+            h_default = self.g(
+                np.dot(x, np.transpose(w_h))
+                + np.dot(r * H_t, np.transpose(r_h))
+                + w_bh
+                + r_bh
+            )
+            h_linear = self.g(
+                np.dot(x, np.transpose(w_h))
+                + r * (np.dot(H_t, np.transpose(r_h)) + r_bh)
+                + w_bh
+            )
+            h = h_linear if self.LBR else h_default
+            H = (1 - z) * h + z * H_t
+            h_list.append(H)
+            H_t = H
+
+        concatenated = np.concatenate(h_list)
+        if self.num_directions == 1:
+            Y[:, 0, :, :] = concatenated
+
+        if self.LAYOUT == 0:
+            Y_h = Y[-1]
+        else:
+            Y = np.transpose(Y, [2, 0, 1, 3])
+            Y_h = Y[:, :, -1, :]
+
+        return Y, Y_h
+
+
+class GRU(Base):
+    @staticmethod
+    def export_defaults() -> None:
+        input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)
+
+        input_size = 2
+        hidden_size = 5
+        weight_scale = 0.1
+        number_of_gates = 3
+
+        node = onnx.helper.make_node(
+            "GRU", inputs=["X", "W", "R"], outputs=["", "Y_h"], hidden_size=hidden_size
+        )
+
+        W = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, input_size)
+        ).astype(np.float32)
+        R = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, hidden_size)
+        ).astype(np.float32)
+
+        gru = GRUHelper(X=input, W=W, R=R)
+        _, Y_h = gru.step()
+        expect(
+            node,
+            inputs=[input, W, R],
+            outputs=[Y_h.astype(np.float32)],
+            name="test_gru_defaults",
+        )
+
+    @staticmethod
+    def export_initial_bias() -> None:
+        input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(
+            np.float32
+        )
+
+        input_size = 3
+        hidden_size = 3
+        weight_scale = 0.1
+        custom_bias = 0.1
+        number_of_gates = 3
+
+        node = onnx.helper.make_node(
+            "GRU",
+            inputs=["X", "W", "R", "B"],
+            outputs=["", "Y_h"],
+            hidden_size=hidden_size,
+        )
+
+        W = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, input_size)
+        ).astype(np.float32)
+        R = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, hidden_size)
+        ).astype(np.float32)
+
+        # Adding custom bias
+        W_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(
+            np.float32
+        )
+        R_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)
+        B = np.concatenate((W_B, R_B), axis=1)
+
+        gru = GRUHelper(X=input, W=W, R=R, B=B)
+        _, Y_h = gru.step()
+        expect(
+            node,
+            inputs=[input, W, R, B],
+            outputs=[Y_h.astype(np.float32)],
+            name="test_gru_with_initial_bias",
+        )
+
+    @staticmethod
+    def export_seq_length() -> None:
+        input = np.array(
+            [
+                [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
+                [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],
+            ]
+        ).astype(np.float32)
+
+        input_size = 3
+        hidden_size = 5
+        number_of_gates = 3
+
+        node = onnx.helper.make_node(
+            "GRU",
+            inputs=["X", "W", "R", "B"],
+            outputs=["", "Y_h"],
+            hidden_size=hidden_size,
+        )
+
+        W = np.random.randn(1, number_of_gates * hidden_size, input_size).astype(
+            np.float32
+        )
+        R = np.random.randn(1, number_of_gates * hidden_size, hidden_size).astype(
+            np.float32
+        )
+
+        # Adding custom bias
+        W_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)
+        R_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)
+        B = np.concatenate((W_B, R_B), axis=1)
+
+        gru = GRUHelper(X=input, W=W, R=R, B=B)
+        _, Y_h = gru.step()
+        expect(
+            node,
+            inputs=[input, W, R, B],
+            outputs=[Y_h.astype(np.float32)],
+            name="test_gru_seq_length",
+        )
+
+    @staticmethod
+    def export_batchwise() -> None:
+        input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)
+
+        input_size = 2
+        hidden_size = 6
+        number_of_gates = 3
+        weight_scale = 0.2
+        layout = 1
+
+        node = onnx.helper.make_node(
+            "GRU",
+            inputs=["X", "W", "R"],
+            outputs=["Y", "Y_h"],
+            hidden_size=hidden_size,
+            layout=layout,
+        )
+
+        W = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, input_size)
+        ).astype(np.float32)
+        R = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, hidden_size)
+        ).astype(np.float32)
+
+        gru = GRUHelper(X=input, W=W, R=R, layout=layout)
+        Y, Y_h = gru.step()
+        expect(
+            node,
+            inputs=[input, W, R],
+            outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],
+            name="test_gru_batchwise",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hammingwindow.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hammingwindow.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa2c1c6c1d641df4d617c246544bba2524f6ae9d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hammingwindow.py
@@ -0,0 +1,48 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class HammingWindow(Base):
+    @staticmethod
+    def export() -> None:
+        # Test periodic window
+        node = onnx.helper.make_node(
+            "HammingWindow",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        size = np.int32(10)
+        a0 = 25 / 46
+        a1 = 1 - a0
+        y = a0 - a1 * np.cos(2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size)
+        expect(
+            node,
+            inputs=[size],
+            outputs=[y.astype(np.float32)],
+            name="test_hammingwindow",
+        )
+
+        # Test symmetric window
+        node = onnx.helper.make_node(
+            "HammingWindow", inputs=["x"], outputs=["y"], periodic=0
+        )
+        size = np.int32(10)
+        a0 = 25 / 46
+        a1 = 1 - a0
+        y = a0 - a1 * np.cos(
+            2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1)
+        )
+        expect(
+            node,
+            inputs=[size],
+            outputs=[y.astype(np.float32)],
+            name="test_hammingwindow_symmetric",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hannwindow.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hannwindow.py
new file mode 100644
index 0000000000000000000000000000000000000000..88ff403c77687b04bf77aacf9382f26f39337bb6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hannwindow.py
@@ -0,0 +1,45 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class HannWindow(Base):
+    @staticmethod
+    def export() -> None:
+        # Test periodic window
+        node = onnx.helper.make_node(
+            "HannWindow",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        size = np.int32(10)
+        a0 = 0.5
+        a1 = 0.5
+        y = a0 - a1 * np.cos(2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / size)
+        expect(
+            node, inputs=[size], outputs=[y.astype(np.float32)], name="test_hannwindow"
+        )
+
+        # Test symmetric window
+        node = onnx.helper.make_node(
+            "HannWindow", inputs=["x"], outputs=["y"], periodic=0
+        )
+        size = np.int32(10)
+        a0 = 0.5
+        a1 = 0.5
+        y = a0 - a1 * np.cos(
+            2 * np.pi * np.arange(0, size, 1, dtype=np.float32) / (size - 1)
+        )
+        expect(
+            node,
+            inputs=[size],
+            outputs=[y.astype(np.float32)],
+            name="test_hannwindow_symmetric",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hardmax.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hardmax.py
new file mode 100644
index 0000000000000000000000000000000000000000..76a31b0ef36343733c1e6815c6f06b0ebb010bec
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hardmax.py
@@ -0,0 +1,92 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def hardmax(x: np.ndarray, axis: int = -1) -> np.ndarray:
+    x_argmax = np.argmax(x, axis=axis)
+    y = np.zeros_like(x)
+    np.put_along_axis(y, np.expand_dims(x_argmax, axis=axis), 1, axis=axis)
+    return y
+
+
+class Hardmax(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Hardmax",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(
+            np.float32
+        )
+        # expect result:
+        # [[1. 0. 0. 0.]
+        # [0. 1. 0. 0.]
+        # [0. 0. 1. 0.]
+        # [0. 0. 0. 1.]]
+        y = hardmax(x)
+        expect(node, inputs=[x], outputs=[y], name="test_hardmax_example")
+
+        # For multiple occurrences of the maximal values, the first occurrence is selected for one-hot output
+        x = np.array([[3, 3, 3, 1]]).astype(np.float32)
+        # expect result:
+        # [[1, 0, 0, 0]]
+        y = hardmax(x)
+        expect(node, inputs=[x], outputs=[y], name="test_hardmax_one_hot")
+
+    @staticmethod
+    def export_hardmax_axis() -> None:
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        node = onnx.helper.make_node(
+            "Hardmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=0,
+        )
+        y = hardmax(x, axis=0)
+        expect(node, inputs=[x], outputs=[y], name="test_hardmax_axis_0")
+
+        node = onnx.helper.make_node(
+            "Hardmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=1,
+        )
+        y = hardmax(x, axis=1)
+        expect(node, inputs=[x], outputs=[y], name="test_hardmax_axis_1")
+
+        node = onnx.helper.make_node(
+            "Hardmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=2,
+        )
+        y = hardmax(x, axis=2)
+        expect(node, inputs=[x], outputs=[y], name="test_hardmax_axis_2")
+
+        node = onnx.helper.make_node(
+            "Hardmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=-1,
+        )
+        y = hardmax(x, axis=-1)
+        expect(node, inputs=[x], outputs=[y], name="test_hardmax_negative_axis")
+
+        # default axis is -1
+        node = onnx.helper.make_node(
+            "Hardmax",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        expect(node, inputs=[x], outputs=[y], name="test_hardmax_default_axis")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hardsigmoid.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hardsigmoid.py
new file mode 100644
index 0000000000000000000000000000000000000000..8be888b81bd609f461d64e95eb9ebb328360d0d0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hardsigmoid.py
@@ -0,0 +1,39 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class HardSigmoid(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "HardSigmoid", inputs=["x"], outputs=["y"], alpha=0.5, beta=0.6
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.clip(x * 0.5 + 0.6, 0, 1)  # expected output [0.1, 0.6, 1.]
+        expect(node, inputs=[x], outputs=[y], name="test_hardsigmoid_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x * 0.5 + 0.6, 0, 1)
+        expect(node, inputs=[x], outputs=[y], name="test_hardsigmoid")
+
+    @staticmethod
+    def export_hardsigmoid_default() -> None:
+        default_alpha = 0.2
+        default_beta = 0.5
+        node = onnx.helper.make_node(
+            "HardSigmoid",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x * default_alpha + default_beta, 0, 1)
+        expect(node, inputs=[x], outputs=[y], name="test_hardsigmoid_default")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hardswish.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hardswish.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c76d40a79cbc45970cb1169705d705fda878203
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/hardswish.py
@@ -0,0 +1,30 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def hardswish(x: np.ndarray) -> np.ndarray:
+    alfa = float(1 / 6)
+    beta = 0.5
+    return x * np.maximum(0, np.minimum(1, alfa * x + beta))
+
+
+class HardSwish(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "HardSwish",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = hardswish(x)
+
+        expect(node, inputs=[x], outputs=[y], name="test_hardswish")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/identity.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/identity.py
new file mode 100644
index 0000000000000000000000000000000000000000..25e76f6fc9606d95d1c03811c054c295f740a985
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/identity.py
@@ -0,0 +1,93 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Identity(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Identity",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        expect(node, inputs=[data], outputs=[data], name="test_identity")
+
+    @staticmethod
+    def export_sequence() -> None:
+        node = onnx.helper.make_node(
+            "Identity",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        data = [
+            np.array(
+                [
+                    [
+                        [
+                            [1, 2],
+                            [3, 4],
+                        ]
+                    ]
+                ],
+                dtype=np.float32,
+            ),
+            np.array(
+                [
+                    [
+                        [
+                            [2, 3],
+                            [1, 5],
+                        ]
+                    ]
+                ],
+                dtype=np.float32,
+            ),
+        ]
+
+        expect(node, inputs=[data], outputs=[data], name="test_identity_sequence")
+
+    @staticmethod
+    def export_identity_opt() -> None:
+        ten_in_tp = onnx.helper.make_tensor_type_proto(
+            onnx.TensorProto.FLOAT, shape=[5]
+        )
+        seq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)
+        opt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)
+
+        identity_node = onnx.helper.make_node(
+            "Identity", inputs=["opt_in"], outputs=["opt_out"]
+        )
+
+        x = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]
+
+        expect(
+            identity_node,
+            inputs=[x],
+            outputs=[x],
+            name="test_identity_opt",
+            opset_imports=[onnx.helper.make_opsetid("", 16)],
+            input_type_protos=[opt_in_tp],
+            output_type_protos=[opt_in_tp],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/if.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/if.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd64537b9ccd79f2c1f01ec1482a0ad4ca2dd4f7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/if.py
@@ -0,0 +1,211 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def compute_if_outputs(x, cond):
+    if cond:
+        return []
+    else:
+        return x
+
+
+class If(Base):
+    @staticmethod
+    def export_if() -> None:
+        # Given a bool scalar input cond.
+        # return constant tensor x if cond is True, otherwise return constant tensor y.
+
+        then_out = onnx.helper.make_tensor_value_info(
+            "then_out", onnx.TensorProto.FLOAT, [5]
+        )
+        else_out = onnx.helper.make_tensor_value_info(
+            "else_out", onnx.TensorProto.FLOAT, [5]
+        )
+
+        x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
+        y = np.array([5, 4, 3, 2, 1]).astype(np.float32)
+
+        then_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["then_out"],
+            value=onnx.numpy_helper.from_array(x),
+        )
+
+        else_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["else_out"],
+            value=onnx.numpy_helper.from_array(y),
+        )
+
+        then_body = onnx.helper.make_graph(
+            [then_const_node], "then_body", [], [then_out]
+        )
+
+        else_body = onnx.helper.make_graph(
+            [else_const_node], "else_body", [], [else_out]
+        )
+
+        if_node = onnx.helper.make_node(
+            "If",
+            inputs=["cond"],
+            outputs=["res"],
+            then_branch=then_body,
+            else_branch=else_body,
+        )
+
+        cond = np.array(1).astype(bool)
+        res = x if cond else y
+        expect(
+            if_node,
+            inputs=[cond],
+            outputs=[res],
+            name="test_if",
+            opset_imports=[onnx.helper.make_opsetid("", 11)],
+        )
+
+    @staticmethod
+    def export_if_seq() -> None:
+        # Given a bool scalar input cond.
+        # return constant sequence x if cond is True, otherwise return constant sequence y.
+
+        then_out = onnx.helper.make_tensor_sequence_value_info(
+            "then_out", onnx.TensorProto.FLOAT, shape=[5]
+        )
+        else_out = onnx.helper.make_tensor_sequence_value_info(
+            "else_out", onnx.TensorProto.FLOAT, shape=[5]
+        )
+
+        x = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]
+        y = [np.array([5, 4, 3, 2, 1]).astype(np.float32)]
+
+        then_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["x"],
+            value=onnx.numpy_helper.from_array(x[0]),
+        )
+
+        then_seq_node = onnx.helper.make_node(
+            "SequenceConstruct", inputs=["x"], outputs=["then_out"]
+        )
+
+        else_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["y"],
+            value=onnx.numpy_helper.from_array(y[0]),
+        )
+
+        else_seq_node = onnx.helper.make_node(
+            "SequenceConstruct", inputs=["y"], outputs=["else_out"]
+        )
+
+        then_body = onnx.helper.make_graph(
+            [then_const_node, then_seq_node], "then_body", [], [then_out]
+        )
+
+        else_body = onnx.helper.make_graph(
+            [else_const_node, else_seq_node], "else_body", [], [else_out]
+        )
+
+        if_node = onnx.helper.make_node(
+            "If",
+            inputs=["cond"],
+            outputs=["res"],
+            then_branch=then_body,
+            else_branch=else_body,
+        )
+
+        cond = np.array(1).astype(bool)
+        res = x if cond else y
+        expect(
+            if_node,
+            inputs=[cond],
+            outputs=[res],
+            name="test_if_seq",
+            opset_imports=[onnx.helper.make_opsetid("", 13)],
+        )
+
+    @staticmethod
+    def export_if_optional() -> None:
+        # Given a bool scalar input cond, return an empty optional sequence of
+        # tensor if True, return an optional sequence with value x
+        # (the input optional sequence) otherwise.
+
+        ten_in_tp = onnx.helper.make_tensor_type_proto(
+            onnx.TensorProto.FLOAT, shape=[5]
+        )
+        seq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)
+
+        then_out_tensor_tp = onnx.helper.make_tensor_type_proto(
+            onnx.TensorProto.FLOAT, shape=[5]
+        )
+        then_out_seq_tp = onnx.helper.make_sequence_type_proto(then_out_tensor_tp)
+        then_out_opt_tp = onnx.helper.make_optional_type_proto(then_out_seq_tp)
+        then_out = onnx.helper.make_value_info("optional_empty", then_out_opt_tp)
+
+        else_out_tensor_tp = onnx.helper.make_tensor_type_proto(
+            onnx.TensorProto.FLOAT, shape=[5]
+        )
+        else_out_seq_tp = onnx.helper.make_sequence_type_proto(else_out_tensor_tp)
+        else_out_opt_tp = onnx.helper.make_optional_type_proto(else_out_seq_tp)
+        else_out = onnx.helper.make_value_info("else_opt", else_out_opt_tp)
+
+        x = [np.array([1, 2, 3, 4, 5]).astype(np.float32)]
+        cond = np.array(0).astype(bool)
+        res = compute_if_outputs(x, cond)
+
+        opt_empty_in = onnx.helper.make_node(
+            "Optional", inputs=[], outputs=["optional_empty"], type=seq_in_tp
+        )
+
+        then_body = onnx.helper.make_graph([opt_empty_in], "then_body", [], [then_out])
+
+        else_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["x"],
+            value=onnx.numpy_helper.from_array(x[0]),
+        )
+
+        else_seq_node = onnx.helper.make_node(
+            "SequenceConstruct", inputs=["x"], outputs=["else_seq"]
+        )
+
+        else_optional_seq_node = onnx.helper.make_node(
+            "Optional", inputs=["else_seq"], outputs=["else_opt"]
+        )
+
+        else_body = onnx.helper.make_graph(
+            [else_const_node, else_seq_node, else_optional_seq_node],
+            "else_body",
+            [],
+            [else_out],
+        )
+
+        if_node = onnx.helper.make_node(
+            "If",
+            inputs=["cond"],
+            outputs=["sequence"],
+            then_branch=then_body,
+            else_branch=else_body,
+        )
+
+        expect(
+            if_node,
+            inputs=[cond],
+            outputs=[res],
+            name="test_if_opt",
+            output_type_protos=[else_out_opt_tp],
+            opset_imports=[onnx.helper.make_opsetid("", 16)],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/image_decoder.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/image_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b24c3a26a69749d2591df5a7bf7a2f39d0d5056
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/image_decoder.py
@@ -0,0 +1,249 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import io
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import _image_decoder_data, expect
+
+
+def generate_checkerboard(width: int, height: int, square_size: int) -> np.ndarray:
+    # Create an empty RGB image
+    image = np.zeros((height, width, 3), dtype=np.uint8)
+
+    # Calculate the number of squares in each dimension
+    num_squares_x = width // square_size
+    num_squares_y = height // square_size
+
+    # Generate a random color for each square
+    colors = np.random.randint(
+        0, 256, size=(num_squares_y, num_squares_x, 3), dtype=np.uint8
+    )
+
+    # Iterate over each square
+    for i in range(num_squares_y):
+        for j in range(num_squares_x):
+            # Calculate the position of the current square
+            x = j * square_size
+            y = i * square_size
+
+            # Get the color for the current square
+            color = colors[i, j]
+
+            # Fill the square with the corresponding color
+            image[y : y + square_size, x : x + square_size, :] = color
+
+    return image
+
+
+def _generate_test_data(
+    format_: str,
+    frozen_data: _image_decoder_data.ImageDecoderData,
+    pixel_format: str = "RGB",
+    height: int = 32,
+    width: int = 32,
+    tile_sz: int = 5,
+) -> tuple[np.ndarray, np.ndarray]:
+    try:
+        import PIL.Image  # noqa: PLC0415
+    except ImportError:
+        # Since pillow is not installed to generate test data for the ImageDecoder operator
+        # directly use the frozen data from _image_decoder_data.py.
+        return frozen_data.data, frozen_data.output
+    np.random.seed(12345)
+    image = generate_checkerboard(height, width, tile_sz)
+    image_pil = PIL.Image.fromarray(image)
+    with io.BytesIO() as f:
+        image_pil.save(f, format=format_)
+        data = f.getvalue()
+        data_array = np.frombuffer(data, dtype=np.uint8)
+    if pixel_format == "BGR":
+        output_pil = PIL.Image.open(io.BytesIO(data))
+        output = np.array(output_pil)[:, :, ::-1]
+    elif pixel_format == "RGB":
+        output_pil = PIL.Image.open(io.BytesIO(data))
+        output = np.array(output_pil)
+    elif pixel_format == "Grayscale":
+        output_pil = PIL.Image.open(io.BytesIO(data)).convert("L")
+        output = np.array(output_pil)[:, :, np.newaxis]
+    else:
+        raise ValueError(f"Unsupported pixel format: {pixel_format}")
+    return data_array, output
+
+
+class ImageDecoder(Base):
+    @staticmethod
+    def export_image_decoder_decode_jpeg_rgb() -> None:
+        node = onnx.helper.make_node(
+            "ImageDecoder",
+            inputs=["data"],
+            outputs=["output"],
+            pixel_format="RGB",
+        )
+
+        data, output = _generate_test_data(
+            "jpeg", _image_decoder_data.image_decoder_decode_jpeg_rgb, "RGB"
+        )
+        expect(
+            node,
+            inputs=[data],
+            outputs=[output],
+            name="test_image_decoder_decode_jpeg_rgb",
+        )
+
+    @staticmethod
+    def export_image_decoder_decode_jpeg_grayscale() -> None:
+        node = onnx.helper.make_node(
+            "ImageDecoder",
+            inputs=["data"],
+            outputs=["output"],
+            pixel_format="Grayscale",
+        )
+
+        data, output = _generate_test_data(
+            "jpeg", _image_decoder_data.image_decoder_decode_jpeg_grayscale, "Grayscale"
+        )
+        expect(
+            node,
+            inputs=[data],
+            outputs=[output],
+            name="test_image_decoder_decode_jpeg_grayscale",
+        )
+
+    @staticmethod
+    def export_image_decoder_decode_jpeg_bgr() -> None:
+        node = onnx.helper.make_node(
+            "ImageDecoder",
+            inputs=["data"],
+            outputs=["output"],
+            pixel_format="BGR",
+        )
+
+        data, output = _generate_test_data(
+            "jpeg", _image_decoder_data.image_decoder_decode_jpeg_bgr, "BGR"
+        )
+        expect(
+            node,
+            inputs=[data],
+            outputs=[output],
+            name="test_image_decoder_decode_jpeg_bgr",
+        )
+
+    @staticmethod
+    def export_image_decoder_decode_jpeg2k_rgb() -> None:
+        node = onnx.helper.make_node(
+            "ImageDecoder",
+            inputs=["data"],
+            outputs=["output"],
+            pixel_format="RGB",
+        )
+
+        data, output = _generate_test_data(
+            "jpeg2000", _image_decoder_data.image_decoder_decode_jpeg2k_rgb, "RGB"
+        )
+        expect(
+            node,
+            inputs=[data],
+            outputs=[output],
+            name="test_image_decoder_decode_jpeg2k_rgb",
+        )
+
+    @staticmethod
+    def export_image_decoder_decode_bmp_rgb() -> None:
+        node = onnx.helper.make_node(
+            "ImageDecoder",
+            inputs=["data"],
+            outputs=["output"],
+            pixel_format="RGB",
+        )
+
+        data, output = _generate_test_data(
+            "bmp", _image_decoder_data.image_decoder_decode_bmp_rgb, "RGB"
+        )
+        expect(
+            node,
+            inputs=[data],
+            outputs=[output],
+            name="test_image_decoder_decode_bmp_rgb",
+        )
+
+    @staticmethod
+    def export_image_decoder_decode_png_rgb() -> None:
+        node = onnx.helper.make_node(
+            "ImageDecoder",
+            inputs=["data"],
+            outputs=["output"],
+            pixel_format="RGB",
+        )
+
+        data, output = _generate_test_data(
+            "png", _image_decoder_data.image_decoder_decode_png_rgb, "RGB"
+        )
+        expect(
+            node,
+            inputs=[data],
+            outputs=[output],
+            name="test_image_decoder_decode_png_rgb",
+        )
+
+    @staticmethod
+    def export_image_decoder_decode_tiff_rgb() -> None:
+        node = onnx.helper.make_node(
+            "ImageDecoder",
+            inputs=["data"],
+            outputs=["output"],
+            pixel_format="RGB",
+        )
+
+        data, output = _generate_test_data(
+            "tiff", _image_decoder_data.image_decoder_decode_tiff_rgb, "RGB"
+        )
+        expect(
+            node,
+            inputs=[data],
+            outputs=[output],
+            name="test_image_decoder_decode_tiff_rgb",
+        )
+
+    @staticmethod
+    def export_image_decoder_decode_webp_rgb() -> None:
+        node = onnx.helper.make_node(
+            "ImageDecoder",
+            inputs=["data"],
+            outputs=["output"],
+            pixel_format="RGB",
+        )
+
+        data, output = _generate_test_data(
+            "webp", _image_decoder_data.image_decoder_decode_webp_rgb, "RGB"
+        )
+        expect(
+            node,
+            inputs=[data],
+            outputs=[output],
+            name="test_image_decoder_decode_webp_rgb",
+        )
+
+    @staticmethod
+    def export_image_decoder_decode_pnm_rgb() -> None:
+        node = onnx.helper.make_node(
+            "ImageDecoder",
+            inputs=["data"],
+            outputs=["output"],
+            pixel_format="RGB",
+        )
+
+        data, output = _generate_test_data(
+            "ppm", _image_decoder_data.image_decoder_decode_pnm_rgb, "RGB"
+        )
+        expect(
+            node,
+            inputs=[data],
+            outputs=[output],
+            name="test_image_decoder_decode_pnm_rgb",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/instancenorm.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/instancenorm.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d28098f21109c867087f0f9c3adbc4738668b42
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/instancenorm.py
@@ -0,0 +1,56 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class InstanceNormalization(Base):
+    @staticmethod
+    def export() -> None:
+        def _instancenorm_test_mode(x, s, bias, epsilon=1e-5):  # type: ignore
+            dims_x = len(x.shape)
+            axis = tuple(range(2, dims_x))
+            mean = np.mean(x, axis=axis, keepdims=True)
+            var = np.var(x, axis=axis, keepdims=True)
+            dim_ones = (1,) * (dims_x - 2)
+            s = s.reshape(-1, *dim_ones)
+            bias = bias.reshape(-1, *dim_ones)
+            return s * (x - mean) / np.sqrt(var + epsilon) + bias
+
+        # input size: (1, 2, 1, 3)
+        x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
+        s = np.array([1.0, 1.5]).astype(np.float32)
+        bias = np.array([0, 1]).astype(np.float32)
+        y = _instancenorm_test_mode(x, s, bias).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "InstanceNormalization",
+            inputs=["x", "s", "bias"],
+            outputs=["y"],
+        )
+
+        # output size: (1, 2, 1, 3)
+        expect(node, inputs=[x, s, bias], outputs=[y], name="test_instancenorm_example")
+
+        # input size: (2, 3, 4, 5)
+        x = np.random.randn(2, 3, 4, 5).astype(np.float32)
+        s = np.random.randn(3).astype(np.float32)
+        bias = np.random.randn(3).astype(np.float32)
+        epsilon = 1e-2
+        y = _instancenorm_test_mode(x, s, bias, epsilon).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "InstanceNormalization",
+            inputs=["x", "s", "bias"],
+            outputs=["y"],
+            epsilon=epsilon,
+        )
+
+        # output size: (2, 3, 4, 5)
+        expect(node, inputs=[x, s, bias], outputs=[y], name="test_instancenorm_epsilon")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/isinf.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/isinf.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fdd998142180ae110424e41745148dcf1e67b74
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/isinf.py
@@ -0,0 +1,56 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class IsInf(Base):
+    @staticmethod
+    def export_infinity() -> None:
+        node = onnx.helper.make_node(
+            "IsInf",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1.2, np.nan, np.inf, 2.8, -np.inf, np.inf], dtype=np.float32)
+        y = np.isinf(x)
+        expect(node, inputs=[x], outputs=[y], name="test_isinf")
+
+    @staticmethod
+    def export_positive_infinity_only() -> None:
+        node = onnx.helper.make_node(
+            "IsInf", inputs=["x"], outputs=["y"], detect_negative=0
+        )
+
+        x = np.array([-1.7, np.nan, np.inf, 3.6, -np.inf, np.inf], dtype=np.float32)
+        y = np.isposinf(x)
+        expect(node, inputs=[x], outputs=[y], name="test_isinf_positive")
+
+    @staticmethod
+    def export_negative_infinity_only() -> None:
+        node = onnx.helper.make_node(
+            "IsInf", inputs=["x"], outputs=["y"], detect_positive=0
+        )
+
+        x = np.array([-1.7, np.nan, np.inf, -3.6, -np.inf, np.inf], dtype=np.float32)
+        y = np.isneginf(x)
+        expect(node, inputs=[x], outputs=[y], name="test_isinf_negative")
+
+    @staticmethod
+    def export_infinity_float16() -> None:
+        node = onnx.helper.make_node(
+            "IsInf",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1.2, np.nan, np.inf, 2.8, -np.inf, np.inf], dtype=np.float16)
+        y = np.isinf(x)
+        expect(node, inputs=[x], outputs=[y], name="test_isinf_float16")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/isnan.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/isnan.py
new file mode 100644
index 0000000000000000000000000000000000000000..74d5dff82e9bb004f78f595ef4b0392ca81bf9e4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/isnan.py
@@ -0,0 +1,36 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class IsNaN(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "IsNaN",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1.2, np.nan, np.inf, 2.8, -np.inf, np.inf], dtype=np.float32)
+        y = np.isnan(x)
+        expect(node, inputs=[x], outputs=[y], name="test_isnan")
+
+    @staticmethod
+    def export_float16() -> None:
+        node = onnx.helper.make_node(
+            "IsNaN",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1.2, np.nan, np.inf, 2.8, -np.inf, np.inf], dtype=np.float16)
+        y = np.isnan(x)
+        expect(node, inputs=[x], outputs=[y], name="test_isnan_float16")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/layernormalization.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/layernormalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..35eac8e3c8ff8f99234e059bfd75eb4838df3232
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/layernormalization.py
@@ -0,0 +1,178 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+# Layer normalization's reference implementation
+def _layer_normalization(X, W, B, axis=-1, epsilon=1e-5):
+    X_shape = X.shape
+    X_rank = len(X_shape)
+    if axis < 0:
+        # If axis = -1 and rank of X is 4,
+        # the axis is changed to -1 + 4 = 3,
+        # which means the last axis.
+        axis = axis + X_rank
+    unsqueezed_rank = X_rank - axis
+    reduction_shape = X_shape[0:axis] + (1,) * unsqueezed_rank
+
+    # Parameter used to convert N-D tensor layer
+    # normalization to equivalent 2-D matrix operations.
+    row_number = 1
+    col_number = 1
+    for i in range(X_rank):
+        if i < axis:
+            row_number *= X_shape[i]
+        else:
+            col_number *= X_shape[i]
+
+    # After reshaping input tensor X into a matrix,
+    # layer normalization is equivalent to conducting
+    # standardization on each column vector (s.t. each
+    # column has zero mean and unit variance).
+    x_mat = np.reshape(X, (row_number, col_number))
+    # This computes mean for every x_mat's column.
+    x_mean = np.sum(x_mat, axis=1, keepdims=True) / col_number
+    x_diff = x_mat - x_mean
+    x_squared_diff = x_diff * x_diff
+    # This computes variance for every x_mat's column.
+    variance = np.sum(x_squared_diff, axis=1, keepdims=True) / col_number
+    variance_eps = variance + epsilon
+    std_dev = np.sqrt(variance_eps)
+    inv_std_dev = np.reciprocal(std_dev)
+    # Standardization step. y_mat is zero-mean and unit-variance.
+    y_mat = x_diff * inv_std_dev
+    # Apply affine transform on normalization outcome.
+    # W is linear coefficient while B is bias.
+    Y = np.reshape(y_mat, X_shape) * W + B
+    # Matrix-level operations' outputs should be reshaped
+    # to compensate the initial tensor-to-matrix reshape.
+    X_mean = np.reshape(x_mean, reduction_shape)
+    X_inv_std_dev = np.reshape(inv_std_dev, reduction_shape)
+
+    return Y, X_mean, X_inv_std_dev
+
+
+def calculate_normalized_shape(X_shape, axis):
+    X_rank = len(X_shape)
+    if axis < 0:
+        axis = axis + X_rank
+    return X_shape[axis:]
+
+
+class LayerNormalization(Base):
+    @staticmethod
+    def export() -> None:
+        X = np.random.randn(2, 3, 4, 5).astype(np.float32)
+
+        def case(axis: int) -> None:
+            normalized_shape = calculate_normalized_shape(X.shape, axis)
+            W = np.random.randn(*normalized_shape).astype(np.float32)
+            B = np.random.randn(*normalized_shape).astype(np.float32)
+            Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis)
+
+            node = onnx.helper.make_node(
+                "LayerNormalization",
+                inputs=["X", "W", "B"],
+                outputs=["Y", "Mean", "InvStdDev"],
+                axis=axis,
+            )
+
+            if axis < 0:
+                name = f"test_layer_normalization_4d_axis_negative_{-axis}"
+            else:
+                name = f"test_layer_normalization_4d_axis{axis}"
+
+            expect(node, inputs=[X, W, B], outputs=[Y, mean, inv_std_dev], name=name)
+
+        for i in range(len(X.shape)):
+            case(i)
+            case(i - len(X.shape))
+
+    @staticmethod
+    def export_default_axis() -> None:
+        X = np.random.randn(2, 3, 4, 5).astype(np.float32)
+
+        # Default axis in LayerNormalization is -1.
+        normalized_shape = calculate_normalized_shape(X.shape, -1)
+        W = np.random.randn(*normalized_shape).astype(np.float32)
+        B = np.random.randn(*normalized_shape).astype(np.float32)
+        # Axis is default to -1 in the reference implementation.
+        Y, mean, inv_std_dev = _layer_normalization(X, W, B)
+
+        # Not specifying axis attribute means -1.
+        node = onnx.helper.make_node(
+            "LayerNormalization",
+            inputs=["X", "W", "B"],
+            outputs=["Y", "Mean", "InvStdDev"],
+        )
+
+        expect(
+            node,
+            inputs=[X, W, B],
+            outputs=[Y, mean, inv_std_dev],
+            name="test_layer_normalization_default_axis",
+        )
+
+    @staticmethod
+    def export2d() -> None:
+        X = np.random.randn(3, 4).astype(np.float32)
+
+        def case(axis: int) -> None:
+            normalized_shape = calculate_normalized_shape(X.shape, axis)
+            W = np.random.randn(*normalized_shape).astype(np.float32)
+            B = np.random.randn(*normalized_shape).astype(np.float32)
+            Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis=axis)
+
+            node = onnx.helper.make_node(
+                "LayerNormalization",
+                inputs=["X", "W", "B"],
+                outputs=["Y", "Mean", "InvStdDev"],
+                axis=axis,
+            )
+
+            if axis < 0:
+                name = f"test_layer_normalization_2d_axis_negative_{-axis}"
+            else:
+                name = f"test_layer_normalization_2d_axis{axis}"
+
+            expect(node, inputs=[X, W, B], outputs=[Y, mean, inv_std_dev], name=name)
+
+        for i in range(len(X.shape)):
+            case(i)
+            case(i - len(X.shape))
+
+    @staticmethod
+    def export3d_epsilon() -> None:
+        epsilon = 1e-1
+        X = np.random.randn(2, 3, 5).astype(np.float32)
+
+        def case(axis: int) -> None:
+            normalized_shape = calculate_normalized_shape(X.shape, axis)
+            W = np.random.randn(*normalized_shape).astype(np.float32)
+            B = np.random.randn(*normalized_shape).astype(np.float32)
+            Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis, epsilon)
+            node = onnx.helper.make_node(
+                "LayerNormalization",
+                inputs=["X", "W", "B"],
+                outputs=["Y", "Mean", "InvStdDev"],
+                axis=axis,
+                epsilon=epsilon,
+            )
+
+            if axis < 0:
+                name = f"test_layer_normalization_3d_axis_negative_{-axis}_epsilon"
+            else:
+                name = f"test_layer_normalization_3d_axis{axis}_epsilon"
+
+            expect(node, inputs=[X, W, B], outputs=[Y, mean, inv_std_dev], name=name)
+
+        for i in range(len(X.shape)):
+            case(i)
+            case(i - len(X.shape))
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/leakyrelu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/leakyrelu.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb4d2f11bb0760826061b9d690d94168eca6bdce
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/leakyrelu.py
@@ -0,0 +1,39 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class LeakyRelu(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "LeakyRelu", inputs=["x"], outputs=["y"], alpha=0.1
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        # expected output [-0.1, 0., 1.]
+        y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
+        expect(node, inputs=[x], outputs=[y], name="test_leakyrelu_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
+        expect(node, inputs=[x], outputs=[y], name="test_leakyrelu")
+
+    @staticmethod
+    def export_leakyrelu_default() -> None:
+        default_alpha = 0.01
+        node = onnx.helper.make_node(
+            "LeakyRelu",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * default_alpha
+        expect(node, inputs=[x], outputs=[y], name="test_leakyrelu_default")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/less.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/less.py
new file mode 100644
index 0000000000000000000000000000000000000000..184accb14a200a1c6094b87fa9d3bbdee59dca26
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/less.py
@@ -0,0 +1,68 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Less(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Less",
+            inputs=["x", "y"],
+            outputs=["less"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(3, 4, 5).astype(np.float32)
+        z = np.less(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less")
+
+        x = np.random.randn(3, 4, 5).astype(np.int8)
+        y = np.random.randn(3, 4, 5).astype(np.int8)
+        z = np.less(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_int8")
+
+        x = np.random.randn(3, 4, 5).astype(np.int16)
+        y = np.random.randn(3, 4, 5).astype(np.int16)
+        z = np.less(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_int16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        z = np.less(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_uint8")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        z = np.less(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_uint16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        z = np.less(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_uint32")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        z = np.less(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_uint64")
+
+    @staticmethod
+    def export_less_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "Less",
+            inputs=["x", "y"],
+            outputs=["less"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(5).astype(np.float32)
+        z = np.less(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_bcast")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/less_equal.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/less_equal.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e4d35f66272e97cf60f6aa7d694724354806159
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/less_equal.py
@@ -0,0 +1,68 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Less(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "LessOrEqual",
+            inputs=["x", "y"],
+            outputs=["less_equal"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(3, 4, 5).astype(np.float32)
+        z = np.less_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_equal")
+
+        x = np.random.randn(3, 4, 5).astype(np.int8)
+        y = np.random.randn(3, 4, 5).astype(np.int8)
+        z = np.less_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_equal_int8")
+
+        x = np.random.randn(3, 4, 5).astype(np.int16)
+        y = np.random.randn(3, 4, 5).astype(np.int16)
+        z = np.less_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_equal_int16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        z = np.less_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_equal_uint8")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        z = np.less_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_equal_uint16")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        z = np.less_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_equal_uint32")
+
+        x = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        z = np.less_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_equal_uint64")
+
+    @staticmethod
+    def export_less_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "LessOrEqual",
+            inputs=["x", "y"],
+            outputs=["less_equal"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(5).astype(np.float32)
+        z = np.less_equal(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_less_equal_bcast")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/log.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/log.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b4d9544b87fed6d8c902a4398902f14fc803a57
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/log.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Log(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Log",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([1, 10]).astype(np.float32)
+        y = np.log(x)  # expected output [0., 2.30258512]
+        expect(node, inputs=[x], outputs=[y], name="test_log_example")
+
+        x = np.exp(np.random.randn(3, 4, 5).astype(np.float32))
+        y = np.log(x)
+        expect(node, inputs=[x], outputs=[y], name="test_log")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/logsoftmax.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/logsoftmax.py
new file mode 100644
index 0000000000000000000000000000000000000000..efc2e6f9268d2b6a98dd8e8f4745355218fb90a1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/logsoftmax.py
@@ -0,0 +1,92 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def logsoftmax(x: np.ndarray, axis: int = -1) -> np.ndarray:
+    x_max = np.max(x, axis=axis, keepdims=True)
+    tmp = np.exp(x - x_max)
+    s = np.sum(tmp, axis=axis, keepdims=True)
+    return (x - x_max) - np.log(s)
+
+
+class LogSoftmax(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "LogSoftmax",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.array([[-1, 0, 1]]).astype(np.float32)
+        # expected output
+        # [[-2.4076061 -1.407606  -0.407606 ]]
+        y = logsoftmax(x)
+        expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_example_1")
+
+    @staticmethod
+    def export_logsoftmax_axis() -> None:
+        x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)
+        # expected output
+        # [[-3.4401896  -2.4401896  -1.4401896  -0.44018966]
+        # [-3.4401896  -2.4401896  -1.4401896  -0.44018966]]
+        y = logsoftmax(x)
+
+        node = onnx.helper.make_node(
+            "LogSoftmax",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_large_number")
+
+        x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
+        node = onnx.helper.make_node(
+            "LogSoftmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=0,
+        )
+        y = logsoftmax(x, axis=0)
+        expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_axis_0")
+
+        node = onnx.helper.make_node(
+            "LogSoftmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=1,
+        )
+        y = logsoftmax(x, axis=1)
+        expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_axis_1")
+
+        node = onnx.helper.make_node(
+            "LogSoftmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=2,
+        )
+        y = logsoftmax(x, axis=2)
+        expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_axis_2")
+
+        node = onnx.helper.make_node(
+            "LogSoftmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=-1,
+        )
+        y = logsoftmax(x, axis=-1)
+        expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_negative_axis")
+
+        # default axis is -1
+        node = onnx.helper.make_node(
+            "LogSoftmax",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        expect(node, inputs=[x], outputs=[y], name="test_logsoftmax_default_axis")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/loop.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/loop.py
new file mode 100644
index 0000000000000000000000000000000000000000..0616be7d97152957036357dfb464d5068a4e9cb6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/loop.py
@@ -0,0 +1,459 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def compute_loop_outputs(x, seq, trip_count):
+    for i in range(trip_count):
+        if seq is None:
+            seq = []
+        seq += [x[: int(i + 1)]]
+    return seq
+
+
+class Loop(Base):
+    @staticmethod
+    def export_loop_11() -> None:
+        # Given a tensor x of values [x1, ..., xN], and initial tensor y
+        # sum up its elements using a scan
+        # returning the final state (y+x1+x2+...+xN) as well the scan_output
+        # [y+x1, y+x1+x2, ..., y+x1+x2+...+xN]
+
+        y_in = onnx.helper.make_tensor_value_info("y_in", onnx.TensorProto.FLOAT, [1])
+        y_out = onnx.helper.make_tensor_value_info("y_out", onnx.TensorProto.FLOAT, [1])
+        scan_out = onnx.helper.make_tensor_value_info(
+            "scan_out", onnx.TensorProto.FLOAT, [1]
+        )
+        cond_in = onnx.helper.make_tensor_value_info(
+            "cond_in", onnx.TensorProto.BOOL, []
+        )
+        cond_out = onnx.helper.make_tensor_value_info(
+            "cond_out", onnx.TensorProto.BOOL, []
+        )
+        iter_count = onnx.helper.make_tensor_value_info(
+            "iter_count", onnx.TensorProto.INT64, []
+        )
+
+        x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
+        y = np.array([-2]).astype(np.float32)
+
+        x_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["x"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_x",
+                data_type=onnx.TensorProto.FLOAT,
+                dims=x.shape,
+                vals=x.flatten().astype(float),
+            ),
+        )
+
+        one_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["one"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_one",
+                data_type=onnx.TensorProto.INT64,
+                dims=(),
+                vals=[1],
+            ),
+        )
+
+        i_add_node = onnx.helper.make_node(
+            "Add", inputs=["iter_count", "one"], outputs=["end"]
+        )
+
+        start_unsqueeze_node = onnx.helper.make_node(
+            "Unsqueeze", inputs=["iter_count"], outputs=["slice_start"], axes=[0]
+        )
+
+        end_unsqueeze_node = onnx.helper.make_node(
+            "Unsqueeze", inputs=["end"], outputs=["slice_end"], axes=[0]
+        )
+
+        slice_node = onnx.helper.make_node(
+            "Slice", inputs=["x", "slice_start", "slice_end"], outputs=["slice_out"]
+        )
+
+        y_add_node = onnx.helper.make_node(
+            "Add", inputs=["y_in", "slice_out"], outputs=["y_out"]
+        )
+
+        identity_node = onnx.helper.make_node(
+            "Identity", inputs=["cond_in"], outputs=["cond_out"]
+        )
+
+        scan_identity_node = onnx.helper.make_node(
+            "Identity", inputs=["y_out"], outputs=["scan_out"]
+        )
+
+        loop_body = onnx.helper.make_graph(
+            [
+                identity_node,
+                x_const_node,
+                one_const_node,
+                i_add_node,
+                start_unsqueeze_node,
+                end_unsqueeze_node,
+                slice_node,
+                y_add_node,
+                scan_identity_node,
+            ],
+            "loop_body",
+            [iter_count, cond_in, y_in],
+            [cond_out, y_out, scan_out],
+        )
+
+        node = onnx.helper.make_node(
+            "Loop",
+            inputs=["trip_count", "cond", "y"],
+            outputs=["res_y", "res_scan"],
+            body=loop_body,
+        )
+
+        trip_count = np.array(5).astype(np.int64)
+        res_y = np.array([13]).astype(np.float32)
+        cond = np.array(1).astype(bool)
+        res_scan = np.array([-1, 1, 4, 8, 13]).astype(np.float32).reshape((5, 1))
+        expect(
+            node,
+            inputs=[trip_count, cond, y],
+            outputs=[res_y, res_scan],
+            name="test_loop11",
+            opset_imports=[onnx.helper.make_opsetid("", 11)],
+        )
+
+    @staticmethod
+    def export_loop_13() -> None:
+        # Given a tensor x of values [x1, ..., xN],
+        # Return a sequence of tensors of
+        #   [[x1], [x1, x2], ..., [x1, ..., xN]]
+
+        seq_in = onnx.helper.make_tensor_sequence_value_info(
+            "seq_in", onnx.TensorProto.FLOAT, None
+        )
+        seq_out = onnx.helper.make_tensor_sequence_value_info(
+            "seq_out", onnx.TensorProto.FLOAT, None
+        )
+        cond_in = onnx.helper.make_tensor_value_info(
+            "cond_in", onnx.TensorProto.BOOL, []
+        )
+        cond_out = onnx.helper.make_tensor_value_info(
+            "cond_out", onnx.TensorProto.BOOL, []
+        )
+        iter_count = onnx.helper.make_tensor_value_info(
+            "iter_count", onnx.TensorProto.INT64, []
+        )
+
+        x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
+
+        x_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["x"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_x",
+                data_type=onnx.TensorProto.FLOAT,
+                dims=x.shape,
+                vals=x.flatten().astype(float),
+            ),
+        )
+
+        one_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["one"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_one",
+                data_type=onnx.TensorProto.INT64,
+                dims=(),
+                vals=[1],
+            ),
+        )
+
+        zero_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["slice_start"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_zero",
+                data_type=onnx.TensorProto.INT64,
+                dims=(1,),
+                vals=[0],
+            ),
+        )
+
+        axes_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["axes"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_axes",
+                data_type=onnx.TensorProto.INT64,
+                dims=(),
+                vals=[0],
+            ),
+        )
+
+        add_node = onnx.helper.make_node(
+            "Add", inputs=["iter_count", "one"], outputs=["end"]
+        )
+
+        end_unsqueeze_node = onnx.helper.make_node(
+            "Unsqueeze", inputs=["end", "axes"], outputs=["slice_end"]
+        )
+
+        slice_node = onnx.helper.make_node(
+            "Slice", inputs=["x", "slice_start", "slice_end"], outputs=["slice_out"]
+        )
+
+        insert_node = onnx.helper.make_node(
+            "SequenceInsert", inputs=["seq_in", "slice_out"], outputs=["seq_out"]
+        )
+
+        identity_node = onnx.helper.make_node(
+            "Identity", inputs=["cond_in"], outputs=["cond_out"]
+        )
+
+        loop_body = onnx.helper.make_graph(
+            [
+                identity_node,
+                x_const_node,
+                one_const_node,
+                zero_const_node,
+                add_node,
+                axes_node,
+                end_unsqueeze_node,
+                slice_node,
+                insert_node,
+            ],
+            "loop_body",
+            [iter_count, cond_in, seq_in],
+            [cond_out, seq_out],
+        )
+
+        node = onnx.helper.make_node(
+            "Loop",
+            inputs=["trip_count", "cond", "seq_empty"],
+            outputs=["seq_res"],
+            body=loop_body,
+        )
+
+        trip_count = np.array(5).astype(np.int64)
+        seq_empty: list[Any] = []
+        seq_res = [x[: int(i)] for i in x]
+        cond = np.array(1).astype(bool)
+        expect(
+            node,
+            inputs=[trip_count, cond, seq_empty],
+            outputs=[seq_res],
+            name="test_loop13_seq",
+            opset_imports=[onnx.helper.make_opsetid("", 13)],
+            input_type_protos=[
+                onnx.helper.make_tensor_type_proto(
+                    onnx.TensorProto.INT64, trip_count.shape
+                ),
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),
+                onnx.helper.make_sequence_type_proto(
+                    onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])
+                ),
+            ],
+        )
+
+    @staticmethod
+    def export_loop_16_none() -> None:
+        # Given a tensor sequence of values [x1, ..., xN], and an initial optional sequence of tensors [x0],
+        # Return a concatenated sequence of tensors of
+        #   [x0, [x1], [x1, x2], ..., [x1, ..., xN]]
+
+        ten_in_tp = onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])
+        seq_in_tp = onnx.helper.make_sequence_type_proto(ten_in_tp)
+        opt_in_tp = onnx.helper.make_optional_type_proto(seq_in_tp)
+        opt_in = onnx.helper.make_value_info("opt_seq_in", opt_in_tp)
+        seq_out = onnx.helper.make_tensor_sequence_value_info(
+            "seq_out", onnx.TensorProto.FLOAT, []
+        )
+        cond_in = onnx.helper.make_tensor_value_info(
+            "cond_in", onnx.TensorProto.BOOL, []
+        )
+        cond_out = onnx.helper.make_tensor_value_info(
+            "cond_out", onnx.TensorProto.BOOL, []
+        )
+        iter_count = onnx.helper.make_tensor_value_info(
+            "iter_count", onnx.TensorProto.INT64, []
+        )
+
+        x0 = np.array(0).astype(np.float32)
+        x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
+
+        optional_has_elem_node = onnx.helper.make_node(
+            "OptionalHasElement", inputs=["opt_seq_in"], outputs=["optional_has_elem"]
+        )
+
+        optional_is_none = onnx.helper.make_node(
+            "Not", inputs=["optional_has_elem"], outputs=["optional_is_none"]
+        )
+
+        optional_get_elem = onnx.helper.make_node(
+            "OptionalGetElement", inputs=["opt_seq_in"], outputs=["seq_in"]
+        )
+
+        constant_in = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["constant_in"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor", data_type=onnx.TensorProto.FLOAT, dims=(), vals=[0]
+            ),
+        )
+
+        seq_const_in = onnx.helper.make_node(
+            "SequenceConstruct", inputs=["constant_in"], outputs=["init_seq_in"]
+        )
+
+        then_seq_out = onnx.helper.make_tensor_sequence_value_info(
+            "init_seq_in", onnx.TensorProto.FLOAT, []
+        )
+        then_body = onnx.helper.make_graph(
+            [constant_in, seq_const_in], "then_body", [], [then_seq_out]
+        )
+
+        else_seq_out = onnx.helper.make_tensor_sequence_value_info(
+            "seq_in", onnx.TensorProto.FLOAT, []
+        )
+        else_body = onnx.helper.make_graph(
+            [optional_get_elem], "else_body", [], [else_seq_out]
+        )
+
+        if_node = onnx.helper.make_node(
+            "If",
+            inputs=["optional_is_none"],
+            outputs=["sequence"],
+            then_branch=then_body,
+            else_branch=else_body,
+        )
+
+        x_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["x"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_x",
+                data_type=onnx.TensorProto.FLOAT,
+                dims=x.shape,
+                vals=x.flatten().astype(float),
+            ),
+        )
+
+        one_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["one"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_one",
+                data_type=onnx.TensorProto.INT64,
+                dims=(),
+                vals=[1],
+            ),
+        )
+
+        zero_const_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["slice_start"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_zero",
+                data_type=onnx.TensorProto.INT64,
+                dims=(1,),
+                vals=[0],
+            ),
+        )
+
+        axes_node = onnx.helper.make_node(
+            "Constant",
+            inputs=[],
+            outputs=["axes"],
+            value=onnx.helper.make_tensor(
+                name="const_tensor_axes",
+                data_type=onnx.TensorProto.INT64,
+                dims=(),
+                vals=[0],
+            ),
+        )
+
+        add_node = onnx.helper.make_node(
+            "Add", inputs=["iter_count", "one"], outputs=["end"]
+        )
+
+        end_unsqueeze_node = onnx.helper.make_node(
+            "Unsqueeze", inputs=["end", "axes"], outputs=["slice_end"]
+        )
+
+        slice_node = onnx.helper.make_node(
+            "Slice", inputs=["x", "slice_start", "slice_end"], outputs=["slice_out"]
+        )
+
+        insert_node = onnx.helper.make_node(
+            "SequenceInsert", inputs=["sequence", "slice_out"], outputs=["seq_out"]
+        )
+
+        identity_node = onnx.helper.make_node(
+            "Identity", inputs=["cond_in"], outputs=["cond_out"]
+        )
+
+        loop_body = onnx.helper.make_graph(
+            [
+                identity_node,
+                optional_has_elem_node,
+                optional_is_none,
+                if_node,
+                x_const_node,
+                one_const_node,
+                zero_const_node,
+                add_node,
+                axes_node,
+                end_unsqueeze_node,
+                slice_node,
+                insert_node,
+            ],
+            "loop_body",
+            [iter_count, cond_in, opt_in],
+            [cond_out, seq_out],
+        )
+
+        node = onnx.helper.make_node(
+            "Loop",
+            inputs=["trip_count", "cond", "opt_seq"],
+            outputs=["seq_res"],
+            body=loop_body,
+        )
+
+        trip_count = np.array(5).astype(np.int64)
+        cond = np.array(1).astype(bool)
+        seq_res = compute_loop_outputs(x, [x0], trip_count)
+        opt_seq_in: list[Any] = [x0]
+        expect(
+            node,
+            inputs=[trip_count, cond, opt_seq_in],
+            outputs=[seq_res],
+            name="test_loop16_seq_none",
+            opset_imports=[onnx.helper.make_opsetid("", 16)],
+            input_type_protos=[
+                onnx.helper.make_tensor_type_proto(
+                    onnx.TensorProto.INT64, trip_count.shape
+                ),
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.BOOL, cond.shape),
+                opt_in_tp,
+            ],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lpnormalization.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lpnormalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..744c68dc1a2ee1019b158776aa06269d7e9de3e9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lpnormalization.py
@@ -0,0 +1,79 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class LpNormalization(Base):
+    @staticmethod
+    def export_l2normalization_axis_0() -> None:
+        node = onnx.helper.make_node(
+            "LpNormalization", inputs=["x"], outputs=["y"], axis=0, p=2
+        )
+        x = np.array(
+            [[[1.0, 2.0, 2.0], [3.0, 4.0, 0.0]], [[0.0, 5.0, 5.0], [6.0, 8.0, 0.0]]],
+            dtype=np.float32,
+        )
+        l2_norm_axis_0 = np.sqrt(np.sum(x**2, axis=0, keepdims=True))
+        y = x / l2_norm_axis_0
+        expect(node, inputs=[x], outputs=[y], name="test_l2normalization_axis_0")
+
+    @staticmethod
+    def export_l2normalization_axis_1() -> None:
+        node = onnx.helper.make_node(
+            "LpNormalization", inputs=["x"], outputs=["y"], axis=1, p=2
+        )
+        x = np.array([[3.0, 4.0], [6.0, 8.0]], dtype=np.float32)
+        l2_norm_axis_1 = np.sqrt(np.sum(x**2, axis=1, keepdims=True))
+        y = x / l2_norm_axis_1
+        expect(node, inputs=[x], outputs=[y], name="test_l2normalization_axis_1")
+
+    @staticmethod
+    def export_l1normalization_axis_0() -> None:
+        node = onnx.helper.make_node(
+            "LpNormalization", inputs=["x"], outputs=["y"], axis=0, p=1
+        )
+        x = np.array([3.0, 4.0], dtype=np.float32)
+        l1_norm_axis_0 = np.sum(abs(x), axis=0, keepdims=True)
+        y = x / l1_norm_axis_0
+        expect(node, inputs=[x], outputs=[y], name="test_l1normalization_axis_0")
+
+    @staticmethod
+    def export_l1normalization_axis_1() -> None:
+        node = onnx.helper.make_node(
+            "LpNormalization", inputs=["x"], outputs=["y"], axis=1, p=1
+        )
+        x = np.array([[3.0, 4.0], [6.0, 8.0]], dtype=np.float32)
+        l1_norm_axis_1 = np.sum(abs(x), axis=1, keepdims=True)
+        y = x / l1_norm_axis_1
+        expect(node, inputs=[x], outputs=[y], name="test_l1normalization_axis_1")
+
+    @staticmethod
+    def export_l1normalization_axis_last() -> None:
+        node = onnx.helper.make_node(
+            "LpNormalization", inputs=["x"], outputs=["y"], axis=-1, p=1
+        )
+        x = np.array(
+            [[[1.0, 2.0, 2.0], [3.0, 4.0, 0.0]], [[0.0, 5.0, 5.0], [6.0, 8.0, 0.0]]],
+            dtype=np.float32,
+        )
+        l1_norm_axis_last = np.sum(abs(x), axis=-1, keepdims=True)
+        y = x / l1_norm_axis_last
+        expect(node, inputs=[x], outputs=[y], name="test_l1normalization_axis_last")
+
+    @staticmethod
+    def export_default() -> None:
+        node = onnx.helper.make_node("LpNormalization", inputs=["x"], outputs=["y"])
+        x = np.array(
+            [[[1.0, 2.0, 2.0], [3.0, 4.0, 0.0]], [[0.0, 5.0, 5.0], [6.0, 8.0, 0.0]]],
+            dtype=np.float32,
+        )
+        lp_norm_default = np.sqrt(np.sum(x**2, axis=-1, keepdims=True))
+        y = x / lp_norm_default
+        expect(node, inputs=[x], outputs=[y], name="test_lpnormalization_default")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lppool.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lppool.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1efbafe818d723c85b24fe45c301a83e7bbfb92
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lppool.py
@@ -0,0 +1,298 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.reference.ops.op_pool_common import (
+    get_output_shape_auto_pad,
+    get_output_shape_explicit_padding,
+    get_pad_shape,
+    pool,
+)
+
+
+class LpPool(Base):
+    @staticmethod
+    def export_lppool_1d_default() -> None:
+        """input_shape: [1, 3, 32]
+        output_shape: [1, 3, 31]
+        """
+        p = 3
+        kernel_shape = [2]
+        strides = [1]
+        node = onnx.helper.make_node(
+            "LpPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=kernel_shape,
+            strides=strides,
+            p=p,
+        )
+        x = np.random.randn(1, 3, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", p=p)
+
+        expect(node, inputs=[x], outputs=[y], name="test_lppool_1d_default")
+
+    @staticmethod
+    def export_lppool_2d_default() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 31, 31]
+        """
+        p = 4
+        node = onnx.helper.make_node(
+            "LpPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            p=p,
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = (2, 2)
+        strides = (1, 1)
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", p=p)
+
+        expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_default")
+
+    @staticmethod
+    def export_lppool_3d_default() -> None:
+        """input_shape: [1, 3, 32, 32, 32]
+        output_shape: [1, 3, 31, 31, 31]
+        """
+        p = 3
+        node = onnx.helper.make_node(
+            "LpPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2, 2],
+            p=p,
+        )
+        x = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = [2, 2, 2]
+        strides = [1, 1, 1]
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", p=p)
+
+        expect(node, inputs=[x], outputs=[y], name="test_lppool_3d_default")
+
+    @staticmethod
+    def export_lppool_2d_same_upper() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 32, 32]
+        pad_shape: [1, 1] -> [0, 1, 0, 1] by axis
+        """
+        p = 2
+        node = onnx.helper.make_node(
+            "LpPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            auto_pad="SAME_UPPER",
+            p=p,
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (2, 2)
+        strides = (1, 1)
+        out_shape = get_output_shape_auto_pad(
+            "SAME_UPPER", x_shape[2:], kernel_shape, strides
+        )
+        pad_shape = get_pad_shape(
+            "SAME_UPPER", x_shape[2:], kernel_shape, strides, out_shape
+        )
+        pad_top = pad_shape[0] // 2
+        pad_bottom = pad_shape[0] - pad_top
+        pad_left = pad_shape[1] // 2
+        pad_right = pad_shape[1] - pad_left
+        padded = np.pad(
+            x,
+            ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
+            mode="constant",
+            constant_values=0,
+        )
+        pads = [pad_top, pad_left, pad_bottom, pad_right]
+        y = pool(
+            padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", pads, pads, p=p
+        )
+
+        expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_same_upper")
+
+    @staticmethod
+    def export_lppool_2d_same_lower() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 32, 32]
+        pad_shape: [1, 1] -> [1, 0, 1, 0] by axis
+        """
+        p = 4
+        node = onnx.helper.make_node(
+            "LpPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            auto_pad="SAME_LOWER",
+            p=p,
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (2, 2)
+        strides = (1, 1)
+        out_shape = get_output_shape_auto_pad(
+            "SAME_LOWER", x_shape[2:], kernel_shape, strides
+        )
+        pad_shape = get_pad_shape(
+            "SAME_LOWER", x_shape[2:], kernel_shape, strides, out_shape
+        )
+        pad_bottom = pad_shape[0] // 2
+        pad_top = pad_shape[0] - pad_bottom
+        pad_right = pad_shape[1] // 2
+        pad_left = pad_shape[1] - pad_right
+        padded = np.pad(
+            x,
+            ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
+            mode="constant",
+            constant_values=0,
+        )
+        pads = [pad_top, pad_left, pad_bottom, pad_right]
+        y = pool(
+            padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", pads, pads, p=p
+        )
+
+        expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_same_lower")
+
+    @staticmethod
+    def export_lppool_2d_pads() -> None:
+        """input_shape: [1, 3, 28, 28]
+        output_shape: [1, 3, 30, 30]
+        pad_shape: [4, 4] -> [2, 2, 2, 2] by axis
+        """
+        p = 3
+        node = onnx.helper.make_node(
+            "LpPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[3, 3],
+            pads=[2, 2, 2, 2],
+            p=p,
+        )
+        x = np.random.randn(1, 3, 28, 28).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (3, 3)
+        strides = (1, 1)
+        pad_bottom = pad_top = pad_right = pad_left = 2
+        pads = [pad_top, pad_left, pad_bottom, pad_right]
+        out_shape, extra_pads = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = np.pad(
+            x,
+            (
+                (0, 0),
+                (0, 0),
+                (extra_pads[0], extra_pads[2]),
+                (extra_pads[1], extra_pads[3]),
+            ),
+            mode="constant",
+            constant_values=0,
+        )
+        y = pool(
+            padded,
+            x_shape,
+            kernel_shape,
+            strides,
+            out_shape,
+            "LPPOOL",
+            pads_required=extra_pads,
+            pads=pads,
+            p=p,
+        )
+
+        expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_pads")
+
+    @staticmethod
+    def export_lppool_2d_strides() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 10, 10]
+        """
+        p = 2
+        node = onnx.helper.make_node(
+            "LpPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[5, 5],
+            strides=[3, 3],
+            p=p,
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = (5, 5)
+        strides = (3, 3)
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "LPPOOL", p=p)
+
+        expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_strides")
+
+    @staticmethod
+    def export_lppool_2d_dilations() -> None:
+        """input_shape: [1, 1, 4, 4]
+        output_shape: [1, 1, 2, 2]
+        """
+        p = 2
+        node = onnx.helper.make_node(
+            "LpPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            strides=[1, 1],
+            dilations=[2, 2],
+            p=p,
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+
+        y = np.array(
+            [
+                [
+                    [
+                        [14.560219778561036, 16.24807680927192],
+                        [21.633307652783937, 23.49468024894146],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+
+        expect(node, inputs=[x], outputs=[y], name="test_lppool_2d_dilations")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lrn.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lrn.py
new file mode 100644
index 0000000000000000000000000000000000000000..86288231d4a4561a9752dab3d5dda2e4b4a00959
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lrn.py
@@ -0,0 +1,70 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import math
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class LRN(Base):
+    @staticmethod
+    def export() -> None:
+        alpha = 0.0002
+        beta = 0.5
+        bias = 2.0
+        nsize = 3
+        node = onnx.helper.make_node(
+            "LRN",
+            inputs=["x"],
+            outputs=["y"],
+            alpha=alpha,
+            beta=beta,
+            bias=bias,
+            size=nsize,
+        )
+        x = np.random.randn(5, 5, 5, 5).astype(np.float32)
+        square_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)
+        for n, c, h, w in np.ndindex(x.shape):
+            square_sum[n, c, h, w] = sum(
+                x[
+                    n,
+                    max(0, c - math.floor((nsize - 1) / 2)) : min(
+                        5, c + math.ceil((nsize - 1) / 2) + 1
+                    ),
+                    h,
+                    w,
+                ]
+                ** 2
+            )
+        y = x / ((bias + (alpha / nsize) * square_sum) ** beta)
+        expect(node, inputs=[x], outputs=[y], name="test_lrn")
+
+    @staticmethod
+    def export_default() -> None:
+        alpha = 0.0001
+        beta = 0.75
+        bias = 1.0
+        nsize = 3
+        node = onnx.helper.make_node("LRN", inputs=["x"], outputs=["y"], size=3)
+        x = np.random.randn(5, 5, 5, 5).astype(np.float32)
+        square_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)
+        for n, c, h, w in np.ndindex(x.shape):
+            square_sum[n, c, h, w] = sum(
+                x[
+                    n,
+                    max(0, c - math.floor((nsize - 1) / 2)) : min(
+                        5, c + math.ceil((nsize - 1) / 2) + 1
+                    ),
+                    h,
+                    w,
+                ]
+                ** 2
+            )
+        y = x / ((bias + (alpha / nsize) * square_sum) ** beta)
+        expect(node, inputs=[x], outputs=[y], name="test_lrn_default")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lstm.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lstm.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9bf6fed11e0c9d2a9856f6ea23088160f6ae30d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/lstm.py
@@ -0,0 +1,278 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class LSTMHelper:
+    def __init__(self, **params: Any) -> None:
+        # LSTM Input Names
+        X = "X"
+        W = "W"
+        R = "R"
+        B = "B"
+        H_0 = "initial_h"
+        C_0 = "initial_c"
+        P = "P"
+        LAYOUT = "layout"
+        number_of_gates = 4
+        number_of_peepholes = 3
+
+        required_inputs = [X, W, R]
+        for i in required_inputs:
+            assert i in params, f"Missing Required Input: {i}"
+
+        self.num_directions = params[W].shape[0]
+
+        if self.num_directions == 1:
+            for k, v in params.items():
+                if k != X:
+                    params[k] = np.squeeze(v, axis=0)
+
+            hidden_size = params[R].shape[-1]
+            batch_size = params[X].shape[1]
+
+            layout = params.get(LAYOUT, 0)
+            x = params[X]
+            x = x if layout == 0 else np.swapaxes(x, 0, 1)
+            b = (
+                params[B]
+                if B in params
+                else np.zeros(2 * number_of_gates * hidden_size, dtype=np.float32)
+            )
+            p = (
+                params[P]
+                if P in params
+                else np.zeros(number_of_peepholes * hidden_size, dtype=np.float32)
+            )
+            h_0 = (
+                params[H_0]
+                if H_0 in params
+                else np.zeros((batch_size, hidden_size), dtype=np.float32)
+            )
+            c_0 = (
+                params[C_0]
+                if C_0 in params
+                else np.zeros((batch_size, hidden_size), dtype=np.float32)
+            )
+
+            self.X = x
+            self.W = params[W]
+            self.R = params[R]
+            self.B = b
+            self.P = p
+            self.H_0 = h_0
+            self.C_0 = c_0
+            self.LAYOUT = layout
+
+        else:
+            raise NotImplementedError()
+
+    def f(self, x: np.ndarray) -> np.ndarray:
+        return 1 / (1 + np.exp(-x))
+
+    def g(self, x: np.ndarray) -> np.ndarray:
+        return np.tanh(x)
+
+    def h(self, x: np.ndarray) -> np.ndarray:
+        return np.tanh(x)
+
+    def step(self) -> tuple[np.ndarray, np.ndarray]:
+        seq_length = self.X.shape[0]
+        hidden_size = self.H_0.shape[-1]
+        batch_size = self.X.shape[1]
+
+        Y = np.empty([seq_length, self.num_directions, batch_size, hidden_size])
+        h_list = []
+
+        [p_i, p_o, p_f] = np.split(self.P, 3)
+        H_t = self.H_0
+        C_t = self.C_0
+        for x in np.split(self.X, self.X.shape[0], axis=0):
+            gates = (
+                np.dot(x, np.transpose(self.W))
+                + np.dot(H_t, np.transpose(self.R))
+                + np.add(*np.split(self.B, 2))
+            )
+            i, o, f, c = np.split(gates, 4, -1)
+            i = self.f(i + p_i * C_t)
+            f = self.f(f + p_f * C_t)
+            c = self.g(c)
+            C = f * C_t + i * c
+            o = self.f(o + p_o * C)
+            H = o * self.h(C)
+            h_list.append(H)
+            H_t = H
+            C_t = C
+
+        concatenated = np.concatenate(h_list)
+        if self.num_directions == 1:
+            Y[:, 0, :, :] = concatenated
+
+        if self.LAYOUT == 0:
+            Y_h = Y[-1]
+        else:
+            Y = np.transpose(Y, [2, 0, 1, 3])
+            Y_h = Y[:, :, -1, :]
+
+        return Y, Y_h
+
+
+class LSTM(Base):
+    @staticmethod
+    def export_defaults() -> None:
+        input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)
+
+        input_size = 2
+        hidden_size = 3
+        weight_scale = 0.1
+        number_of_gates = 4
+
+        node = onnx.helper.make_node(
+            "LSTM", inputs=["X", "W", "R"], outputs=["", "Y_h"], hidden_size=hidden_size
+        )
+
+        W = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, input_size)
+        ).astype(np.float32)
+        R = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, hidden_size)
+        ).astype(np.float32)
+
+        lstm = LSTMHelper(X=input, W=W, R=R)
+        _, Y_h = lstm.step()
+        expect(
+            node,
+            inputs=[input, W, R],
+            outputs=[Y_h.astype(np.float32)],
+            name="test_lstm_defaults",
+        )
+
+    @staticmethod
+    def export_initial_bias() -> None:
+        input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(
+            np.float32
+        )
+
+        input_size = 3
+        hidden_size = 4
+        weight_scale = 0.1
+        custom_bias = 0.1
+        number_of_gates = 4
+
+        node = onnx.helper.make_node(
+            "LSTM",
+            inputs=["X", "W", "R", "B"],
+            outputs=["", "Y_h"],
+            hidden_size=hidden_size,
+        )
+
+        W = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, input_size)
+        ).astype(np.float32)
+        R = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, hidden_size)
+        ).astype(np.float32)
+
+        # Adding custom bias
+        W_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(
+            np.float32
+        )
+        R_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)
+        B = np.concatenate((W_B, R_B), 1)
+
+        lstm = LSTMHelper(X=input, W=W, R=R, B=B)
+        _, Y_h = lstm.step()
+        expect(
+            node,
+            inputs=[input, W, R, B],
+            outputs=[Y_h.astype(np.float32)],
+            name="test_lstm_with_initial_bias",
+        )
+
+    @staticmethod
+    def export_peepholes() -> None:
+        input = np.array([[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]).astype(
+            np.float32
+        )
+
+        input_size = 4
+        hidden_size = 3
+        weight_scale = 0.1
+        number_of_gates = 4
+        number_of_peepholes = 3
+
+        node = onnx.helper.make_node(
+            "LSTM",
+            inputs=["X", "W", "R", "B", "sequence_lens", "initial_h", "initial_c", "P"],
+            outputs=["", "Y_h"],
+            hidden_size=hidden_size,
+        )
+
+        # Initializing Inputs
+        W = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, input_size)
+        ).astype(np.float32)
+        R = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, hidden_size)
+        ).astype(np.float32)
+        B = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(np.float32)
+        seq_lens = np.repeat(input.shape[0], input.shape[1]).astype(np.int32)
+        init_h = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)
+        init_c = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)
+        P = weight_scale * np.ones((1, number_of_peepholes * hidden_size)).astype(
+            np.float32
+        )
+
+        lstm = LSTMHelper(
+            X=input, W=W, R=R, B=B, P=P, initial_c=init_c, initial_h=init_h
+        )
+        _, Y_h = lstm.step()
+        expect(
+            node,
+            inputs=[input, W, R, B, seq_lens, init_h, init_c, P],
+            outputs=[Y_h.astype(np.float32)],
+            name="test_lstm_with_peepholes",
+        )
+
+    @staticmethod
+    def export_batchwise() -> None:
+        input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)
+
+        input_size = 2
+        hidden_size = 7
+        weight_scale = 0.3
+        number_of_gates = 4
+        layout = 1
+
+        node = onnx.helper.make_node(
+            "LSTM",
+            inputs=["X", "W", "R"],
+            outputs=["Y", "Y_h"],
+            hidden_size=hidden_size,
+            layout=layout,
+        )
+
+        W = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, input_size)
+        ).astype(np.float32)
+        R = weight_scale * np.ones(
+            (1, number_of_gates * hidden_size, hidden_size)
+        ).astype(np.float32)
+
+        lstm = LSTMHelper(X=input, W=W, R=R, layout=layout)
+        Y, Y_h = lstm.step()
+        expect(
+            node,
+            inputs=[input, W, R],
+            outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],
+            name="test_lstm_batchwise",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/matmul.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/matmul.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5be47374f227d4120cff279024dbf5a0a289e34
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/matmul.py
@@ -0,0 +1,38 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class MatMul(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "MatMul",
+            inputs=["a", "b"],
+            outputs=["c"],
+        )
+
+        # 2d
+        a = np.random.randn(3, 4).astype(np.float32)
+        b = np.random.randn(4, 3).astype(np.float32)
+        c = np.matmul(a, b)
+        expect(node, inputs=[a, b], outputs=[c], name="test_matmul_2d")
+
+        # 3d
+        a = np.random.randn(2, 3, 4).astype(np.float32)
+        b = np.random.randn(2, 4, 3).astype(np.float32)
+        c = np.matmul(a, b)
+        expect(node, inputs=[a, b], outputs=[c], name="test_matmul_3d")
+
+        # 4d
+        a = np.random.randn(1, 2, 3, 4).astype(np.float32)
+        b = np.random.randn(1, 2, 4, 3).astype(np.float32)
+        c = np.matmul(a, b)
+        expect(node, inputs=[a, b], outputs=[c], name="test_matmul_4d")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/matmulinteger.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/matmulinteger.py
new file mode 100644
index 0000000000000000000000000000000000000000..684a2f1f6f9c3f35fd44ecef22b04e396f35f40e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/matmulinteger.py
@@ -0,0 +1,60 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class MatMulInteger(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "MatMulInteger",
+            inputs=["A", "B", "a_zero_point", "b_zero_point"],
+            outputs=["Y"],
+        )
+
+        A = np.array(
+            [
+                [11, 7, 3],
+                [10, 6, 2],
+                [9, 5, 1],
+                [8, 4, 0],
+            ],
+            dtype=np.uint8,
+        )
+
+        a_zero_point = np.array([12], dtype=np.uint8)
+
+        B = np.array(
+            [
+                [1, 4],
+                [2, 5],
+                [3, 6],
+            ],
+            dtype=np.uint8,
+        )
+
+        b_zero_point = np.array([0], dtype=np.uint8)
+
+        output = np.array(
+            [
+                [-38, -83],
+                [-44, -98],
+                [-50, -113],
+                [-56, -128],
+            ],
+            dtype=np.int32,
+        )
+
+        expect(
+            node,
+            inputs=[A, B, a_zero_point, b_zero_point],
+            outputs=[output],
+            name="test_matmulinteger",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/max.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/max.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f2c9f122899154396afeef6f23c348bc6e7a7df
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/max.py
@@ -0,0 +1,66 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.backend.test.case.utils import all_numeric_dtypes
+
+
+class Max(Base):
+    @staticmethod
+    def export() -> None:
+        data_0 = np.array([3, 2, 1]).astype(np.float32)
+        data_1 = np.array([1, 4, 4]).astype(np.float32)
+        data_2 = np.array([2, 5, 3]).astype(np.float32)
+        result = np.array([3, 5, 4]).astype(np.float32)
+        node = onnx.helper.make_node(
+            "Max",
+            inputs=["data_0", "data_1", "data_2"],
+            outputs=["result"],
+        )
+        expect(
+            node,
+            inputs=[data_0, data_1, data_2],
+            outputs=[result],
+            name="test_max_example",
+        )
+
+        node = onnx.helper.make_node(
+            "Max",
+            inputs=["data_0"],
+            outputs=["result"],
+        )
+        expect(node, inputs=[data_0], outputs=[data_0], name="test_max_one_input")
+
+        result = np.maximum(data_0, data_1)
+        node = onnx.helper.make_node(
+            "Max",
+            inputs=["data_0", "data_1"],
+            outputs=["result"],
+        )
+        expect(
+            node, inputs=[data_0, data_1], outputs=[result], name="test_max_two_inputs"
+        )
+
+    @staticmethod
+    def export_max_all_numeric_types() -> None:
+        for op_dtype in all_numeric_dtypes:
+            data_0 = np.array([3, 2, 1]).astype(op_dtype)
+            data_1 = np.array([1, 4, 4]).astype(op_dtype)
+            result = np.array([3, 4, 4]).astype(op_dtype)
+            node = onnx.helper.make_node(
+                "Max",
+                inputs=["data_0", "data_1"],
+                outputs=["result"],
+            )
+            expect(
+                node,
+                inputs=[data_0, data_1],
+                outputs=[result],
+                name=f"test_max_{np.dtype(op_dtype).name}",
+            )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/maxpool.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/maxpool.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeedf72c5680d360791867b0de30f91fa8e58271
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/maxpool.py
@@ -0,0 +1,725 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.reference.ops.op_pool_common import (
+    get_output_shape_auto_pad,
+    get_output_shape_explicit_padding,
+    get_pad_shape,
+    pool,
+)
+
+
+class MaxPool(Base):
+    @staticmethod
+    def export_maxpool_2d_uint8() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 5, 5]
+        pad_shape: [4, 4] -> [2, 2, 2, 2] by axis
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[5, 5],
+            pads=[2, 2, 2, 2],
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.uint8)
+        y = np.array(
+            [
+                [
+                    [
+                        [13, 14, 15, 15, 15],
+                        [18, 19, 20, 20, 20],
+                        [23, 24, 25, 25, 25],
+                        [23, 24, 25, 25, 25],
+                        [23, 24, 25, 25, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.uint8)
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_uint8")
+
+    @staticmethod
+    def export_maxpool_2d_precomputed_pads() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 5, 5]
+        pad_shape: [4, 4] -> [2, 2, 2, 2] by axis
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[5, 5],
+            pads=[2, 2, 2, 2],
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array(
+            [
+                [
+                    [
+                        [13, 14, 15, 15, 15],
+                        [18, 19, 20, 20, 20],
+                        [23, 24, 25, 25, 25],
+                        [23, 24, 25, 25, 25],
+                        [23, 24, 25, 25, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_precomputed_pads")
+
+    @staticmethod
+    def export_maxpool_with_argmax_2d_precomputed_pads() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 5, 5]
+        pad_shape: [4, 4] -> [2, 2, 2, 2] by axis
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y", "z"],
+            kernel_shape=[5, 5],
+            pads=[2, 2, 2, 2],
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array(
+            [
+                [
+                    [
+                        [13, 14, 15, 15, 15],
+                        [18, 19, 20, 20, 20],
+                        [23, 24, 25, 25, 25],
+                        [23, 24, 25, 25, 25],
+                        [23, 24, 25, 25, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        z = np.array(
+            [
+                [
+                    [
+                        [12, 13, 14, 14, 14],
+                        [17, 18, 19, 19, 19],
+                        [22, 23, 24, 24, 24],
+                        [22, 23, 24, 24, 24],
+                        [22, 23, 24, 24, 24],
+                    ]
+                ]
+            ]
+        ).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y, z],
+            name="test_maxpool_with_argmax_2d_precomputed_pads",
+        )
+
+    @staticmethod
+    def export_maxpool_2d_precomputed_strides() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 2, 2]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool", inputs=["x"], outputs=["y"], kernel_shape=[2, 2], strides=[2, 2]
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)
+
+        expect(
+            node, inputs=[x], outputs=[y], name="test_maxpool_2d_precomputed_strides"
+        )
+
+    @staticmethod
+    def export_maxpool_with_argmax_2d_precomputed_strides() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 2, 2]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y", "z"],
+            kernel_shape=[2, 2],
+            strides=[2, 2],
+            storage_order=1,
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32)
+        z = np.array([[[[6, 16], [8, 18]]]]).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y, z],
+            name="test_maxpool_with_argmax_2d_precomputed_strides",
+        )
+
+    @staticmethod
+    def export_maxpool_2d_precomputed_same_upper() -> None:
+        """input_shape: [1, 1, 5, 5]
+        output_shape: [1, 1, 3, 3]
+        pad_shape: [2, 2] -> [1, 1, 1, 1] by axis
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[3, 3],
+            strides=[2, 2],
+            auto_pad="SAME_UPPER",
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4, 5],
+                        [6, 7, 8, 9, 10],
+                        [11, 12, 13, 14, 15],
+                        [16, 17, 18, 19, 20],
+                        [21, 22, 23, 24, 25],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[7, 9, 10], [17, 19, 20], [22, 24, 25]]]]).astype(np.float32)
+
+        expect(
+            node, inputs=[x], outputs=[y], name="test_maxpool_2d_precomputed_same_upper"
+        )
+
+    @staticmethod
+    def export_maxpool_1d_default() -> None:
+        """input_shape: [1, 3, 32]
+        output_shape: [1, 3, 31]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2],
+        )
+        x = np.random.randn(1, 3, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = [2]
+        strides = [1]
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX")
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_1d_default")
+
+    @staticmethod
+    def export_maxpool_2d_default() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 31, 31]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = (2, 2)
+        strides = (1, 1)
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX")
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_default")
+
+    @staticmethod
+    def export_maxpool_3d_default() -> None:
+        """input_shape: [1, 3, 32, 32, 32]
+        output_shape: [1, 3, 31, 31, 31]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2, 2],
+        )
+        x = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = [2, 2, 2]
+        strides = [1, 1, 1]
+        out_shape, _ = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX")
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_3d_default")
+
+    @staticmethod
+    def export_maxpool_2d_same_upper() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 32, 32]
+        pad_shape: [1, 1] -> [0, 1, 0, 1] by axis
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            auto_pad="SAME_UPPER",
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (2, 2)
+        strides = (1, 1)
+        out_shape = get_output_shape_auto_pad(
+            "SAME_UPPER", x_shape[2:], kernel_shape, strides
+        )
+        pad_shape = get_pad_shape(
+            "SAME_UPPER", x_shape[2:], kernel_shape, strides, out_shape
+        )
+        pad_top = pad_shape[0] // 2
+        pad_bottom = pad_shape[0] - pad_top
+        pad_left = pad_shape[1] // 2
+        pad_right = pad_shape[1] - pad_left
+        padded = np.pad(
+            x,
+            ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
+            mode="constant",
+            constant_values=np.nan,
+        )
+        pads = [pad_top, pad_left, pad_bottom, pad_right]
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX", pads, pads)
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_same_upper")
+
+    @staticmethod
+    def export_maxpool_2d_same_lower() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 32, 32]
+        pad_shape: [1, 1] -> [1, 0, 1, 0] by axis
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            auto_pad="SAME_LOWER",
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (2, 2)
+        strides = (1, 1)
+        out_shape = get_output_shape_auto_pad(
+            "SAME_LOWER", x_shape[2:], kernel_shape, strides
+        )
+        pad_shape = get_pad_shape(
+            "SAME_LOWER", x_shape[2:], kernel_shape, strides, out_shape
+        )
+        pad_bottom = pad_shape[0] // 2
+        pad_top = pad_shape[0] - pad_bottom
+        pad_right = pad_shape[1] // 2
+        pad_left = pad_shape[1] - pad_right
+        padded = np.pad(
+            x,
+            ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
+            mode="constant",
+            constant_values=np.nan,
+        )
+        pads = [pad_top, pad_left, pad_bottom, pad_right]
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX", pads, pads)
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_same_lower")
+
+    @staticmethod
+    def export_maxpool_2d_pads() -> None:
+        """input_shape: [1, 3, 28, 28]
+        output_shape: [1, 3, 30, 30]
+        pad_shape: [4, 4] -> [2, 2, 2, 2] by axis
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[3, 3],
+            pads=[2, 2, 2, 2],
+        )
+        x = np.random.randn(1, 3, 28, 28).astype(np.float32)
+        x_shape = np.shape(x)
+        kernel_shape = (3, 3)
+        strides = (1, 1)
+        pad_bottom = pad_top = pad_right = pad_left = 2
+        pads = [pad_top, pad_left, pad_bottom, pad_right]
+        out_shape, extra_pads = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = np.pad(
+            x,
+            ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
+            mode="constant",
+            constant_values=np.nan,
+        )
+
+        y = pool(
+            padded,
+            x_shape,
+            kernel_shape,
+            strides,
+            out_shape,
+            "MAX",
+            pads_required=extra_pads,
+            pads=pads,
+        )
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_pads")
+
+    @staticmethod
+    def export_maxpool_2d_strides() -> None:
+        """input_shape: [1, 3, 32, 32]
+        output_shape: [1, 3, 10, 10]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool", inputs=["x"], outputs=["y"], kernel_shape=[5, 5], strides=[3, 3]
+        )
+        x = np.random.randn(1, 3, 32, 32).astype(np.float32)
+        x_shape = np.shape(x)
+        pads = None
+        kernel_shape = (5, 5)
+        strides = (3, 3)
+        out_shape, pads = get_output_shape_explicit_padding(
+            pads, x_shape[2:], kernel_shape, strides
+        )
+        padded = x
+        y = pool(padded, x_shape, kernel_shape, strides, out_shape, "MAX")
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_strides")
+
+    @staticmethod
+    def export_maxpool_2d_ceil() -> None:
+        """input_shape: [1, 1, 4, 4]
+        output_shape: [1, 1, 2, 2]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[3, 3],
+            strides=[2, 2],
+            ceil_mode=True,
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_ceil")
+
+    @staticmethod
+    def export_maxpool_2d_ceil_output_size_reduce_by_one() -> None:
+        """input_shape: [1, 1, 2, 2]
+        output_shape: [1, 1, 1, 1]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[1, 1],
+            strides=[2, 2],
+            ceil_mode=True,
+        )
+        x = np.array([[[[1, 2], [3, 4]]]]).astype(np.float32)
+        y = np.array([[[[1]]]]).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_maxpool_2d_ceil_output_size_reduce_by_one",
+        )
+
+    @staticmethod
+    def export_maxpool_2d_dilations() -> None:
+        """input_shape: [1, 1, 4, 4]
+        output_shape: [1, 1, 2, 2]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            strides=[1, 1],
+            dilations=[2, 2],
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_2d_dilations")
+
+    @staticmethod
+    def export_maxpool_3d_dilations() -> None:
+        """input_shape: [1, 1, 4, 4, 4]
+        output_shape: [1, 1, 2, 2, 2]
+        """
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2, 2],
+            strides=[1, 1, 1],
+            dilations=[2, 2, 2],
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+        y = np.array([[[[[11, 12], [15, 16]], [[11, 12], [15, 16]]]]]).astype(
+            np.float32
+        )
+
+        expect(node, inputs=[x], outputs=[y], name="test_maxpool_3d_dilations")
+
+    @staticmethod
+    def export_maxpool_3d_dilations_use_ref_impl() -> None:
+        """input_shape: [1, 1, 4, 4, 4]
+        output_shape: [1, 1, 2, 2, 2]
+        """
+        dilations = [2, 2, 2]
+        kernel_shape = [2, 2, 2]
+        strides = [1, 1, 1]
+        ceil_mode = False
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=[2, 2, 2],
+            strides=[1, 1, 1],
+            dilations=dilations,
+        )
+        x = np.array(
+            [
+                [
+                    [
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                        [
+                            [1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12],
+                            [13, 14, 15, 16],
+                        ],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+
+        x_shape = x.shape[2:]
+        out_shape, pads = get_output_shape_explicit_padding(
+            None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode
+        )
+        padded = x
+        y = pool(
+            padded,
+            (1, 1, *x_shape),
+            kernel_shape,
+            strides,
+            out_shape,
+            "MAX",
+            pads_required=pads,
+            pads=None,
+            dilations=dilations,
+        )
+
+        expect(
+            node, inputs=[x], outputs=[y], name="test_maxpool_3d_dilations_use_ref_impl"
+        )
+
+    @staticmethod
+    def export_maxpool_3d_dilations_use_ref_impl_large() -> None:
+        x_shape = (32, 32, 32)
+        dilations = (2, 2, 2)
+        kernel_shape = (5, 5, 5)
+        strides = (3, 3, 3)
+        ceil_mode = True
+
+        node = onnx.helper.make_node(
+            "MaxPool",
+            inputs=["x"],
+            outputs=["y"],
+            kernel_shape=kernel_shape,
+            strides=strides,
+            dilations=dilations,
+            ceil_mode=ceil_mode,
+        )
+
+        x = np.random.randn(1, 1, *x_shape).astype(np.float32)
+        out_shape, pads = get_output_shape_explicit_padding(
+            None, x_shape, kernel_shape, strides, dilations, ceil_mode=ceil_mode
+        )
+        padded = np.pad(
+            x,
+            (
+                (0, 0),
+                (0, 0),
+                (pads[0], pads[3]),
+                (pads[1], pads[4]),
+                (pads[2], pads[5]),
+            ),
+            mode="constant",
+            constant_values=0,
+        )
+        y = pool(
+            padded,
+            (1, 1, *x_shape),
+            kernel_shape,
+            strides,
+            out_shape,
+            "MAX",
+            pads_required=pads,
+            pads=None,
+            dilations=dilations,
+        )
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_maxpool_3d_dilations_use_ref_impl_large",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/maxunpool.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/maxunpool.py
new file mode 100644
index 0000000000000000000000000000000000000000..65643189a30ec7fea1183cf58fb1b04f78f32d4a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/maxunpool.py
@@ -0,0 +1,67 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class MaxUnpool(Base):
+    @staticmethod
+    def export_without_output_shape() -> None:
+        node = onnx.helper.make_node(
+            "MaxUnpool",
+            inputs=["xT", "xI"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            strides=[2, 2],
+        )
+        xT = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)
+        xI = np.array([[[[5, 7], [13, 15]]]], dtype=np.int64)
+        y = np.array(
+            [[[[0, 0, 0, 0], [0, 1, 0, 2], [0, 0, 0, 0], [0, 3, 0, 4]]]],
+            dtype=np.float32,
+        )
+        expect(
+            node,
+            inputs=[xT, xI],
+            outputs=[y],
+            name="test_maxunpool_export_without_output_shape",
+        )
+
+    @staticmethod
+    def export_with_output_shape() -> None:
+        node = onnx.helper.make_node(
+            "MaxUnpool",
+            inputs=["xT", "xI", "output_shape"],
+            outputs=["y"],
+            kernel_shape=[2, 2],
+            strides=[2, 2],
+        )
+        xT = np.array([[[[5, 6], [7, 8]]]], dtype=np.float32)
+        xI = np.array([[[[5, 7], [13, 15]]]], dtype=np.int64)
+        output_shape = np.array((1, 1, 5, 5), dtype=np.int64)
+        y = np.array(
+            [
+                [
+                    [
+                        [0, 0, 0, 0, 0],
+                        [0, 5, 0, 6, 0],
+                        [0, 0, 0, 0, 0],
+                        [0, 7, 0, 8, 0],
+                        [0, 0, 0, 0, 0],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+        expect(
+            node,
+            inputs=[xT, xI, output_shape],
+            outputs=[y],
+            name="test_maxunpool_export_with_output_shape",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mean.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mean.py
new file mode 100644
index 0000000000000000000000000000000000000000..25061bad7e88276c7c869264fc78d9546bf75a15
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mean.py
@@ -0,0 +1,47 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Mean(Base):
+    @staticmethod
+    def export() -> None:
+        data_0 = np.array([3, 0, 2]).astype(np.float32)
+        data_1 = np.array([1, 3, 4]).astype(np.float32)
+        data_2 = np.array([2, 6, 6]).astype(np.float32)
+        result = np.array([2, 3, 4]).astype(np.float32)
+        node = onnx.helper.make_node(
+            "Mean",
+            inputs=["data_0", "data_1", "data_2"],
+            outputs=["result"],
+        )
+        expect(
+            node,
+            inputs=[data_0, data_1, data_2],
+            outputs=[result],
+            name="test_mean_example",
+        )
+
+        node = onnx.helper.make_node(
+            "Mean",
+            inputs=["data_0"],
+            outputs=["result"],
+        )
+        expect(node, inputs=[data_0], outputs=[data_0], name="test_mean_one_input")
+
+        result = np.divide(np.add(data_0, data_1), 2.0)
+        node = onnx.helper.make_node(
+            "Mean",
+            inputs=["data_0", "data_1"],
+            outputs=["result"],
+        )
+        expect(
+            node, inputs=[data_0, data_1], outputs=[result], name="test_mean_two_inputs"
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/meanvariancenormalization.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/meanvariancenormalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..241c2d53dafb8660f455560879e8d81354914e82
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/meanvariancenormalization.py
@@ -0,0 +1,49 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class MeanVarianceNormalization(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "MeanVarianceNormalization", inputs=["X"], outputs=["Y"]
+        )
+
+        input_data = np.array(
+            [
+                [
+                    [[0.8439683], [0.5665144], [0.05836735]],
+                    [[0.02916367], [0.12964272], [0.5060197]],
+                    [[0.79538304], [0.9411346], [0.9546573]],
+                ],
+                [
+                    [[0.17730942], [0.46192095], [0.26480448]],
+                    [[0.6746842], [0.01665257], [0.62473077]],
+                    [[0.9240844], [0.9722341], [0.11965699]],
+                ],
+                [
+                    [[0.41356155], [0.9129373], [0.59330076]],
+                    [[0.81929934], [0.7862604], [0.11799799]],
+                    [[0.69248444], [0.54119414], [0.07513223]],
+                ],
+            ],
+            dtype=np.float32,
+        )
+
+        # Calculate expected output data
+        data_mean = np.mean(input_data, axis=(0, 2, 3), keepdims=1)
+        data_mean_squared = np.power(data_mean, 2)
+        data_squared = np.power(input_data, 2)
+        data_squared_mean = np.mean(data_squared, axis=(0, 2, 3), keepdims=1)
+        std = np.sqrt(data_squared_mean - data_mean_squared)
+        expected_output = (input_data - data_mean) / (std + 1e-9)
+
+        expect(node, inputs=[input_data], outputs=[expected_output], name="test_mvn")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/melweightmatrix.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/melweightmatrix.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7c73540d4d0ba5b8b9afe27cad0571748fe1860
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/melweightmatrix.py
@@ -0,0 +1,90 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class MelWeightMatrix(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "MelWeightMatrix",
+            inputs=[
+                "num_mel_bins",
+                "dft_length",
+                "sample_rate",
+                "lower_edge_hertz",
+                "upper_edge_hertz",
+            ],
+            outputs=["output"],
+        )
+
+        num_mel_bins = np.int32(8)
+        dft_length = np.int32(16)
+        sample_rate = np.int32(8192)
+        lower_edge_hertz = np.float32(0)
+        upper_edge_hertz = np.float32(8192 / 2)
+
+        num_spectrogram_bins = dft_length // 2 + 1
+        frequency_bins = np.arange(0, num_mel_bins + 2)
+
+        low_frequency_mel = 2595 * np.log10(1 + lower_edge_hertz / 700)
+        high_frequency_mel = 2595 * np.log10(1 + upper_edge_hertz / 700)
+        mel_step = (high_frequency_mel - low_frequency_mel) / frequency_bins.shape[0]
+
+        frequency_bins = frequency_bins * mel_step + low_frequency_mel
+        frequency_bins = 700 * (np.power(10, (frequency_bins / 2595)) - 1)
+        frequency_bins = ((dft_length + 1) * frequency_bins) // sample_rate
+        frequency_bins = frequency_bins.astype(int)
+
+        output = np.zeros((num_spectrogram_bins, num_mel_bins))
+        output.flags.writeable = True
+
+        for i in range(num_mel_bins):
+            lower_frequency_value = frequency_bins[i]  # left
+            center_frequency_point = frequency_bins[i + 1]  # center
+            higher_frequency_point = frequency_bins[i + 2]  # right
+            low_to_center = center_frequency_point - lower_frequency_value
+            if low_to_center == 0:
+                output[center_frequency_point, i] = 1
+            else:
+                for j in range(lower_frequency_value, center_frequency_point + 1):
+                    output[j, i] = float(j - lower_frequency_value) / float(
+                        low_to_center
+                    )
+            center_to_high = higher_frequency_point - center_frequency_point
+            if center_to_high > 0:
+                for j in range(center_frequency_point, higher_frequency_point):
+                    output[j, i] = float(higher_frequency_point - j) / float(
+                        center_to_high
+                    )
+
+        # Expected output
+        # 1.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
+        # 0.000000, 0.000000, 1.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000,
+        # 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000,
+        # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000,
+        # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000,
+        # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000,
+        # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
+        # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
+        # 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
+        output = output.astype(np.float32)
+        expect(
+            node,
+            inputs=[
+                num_mel_bins,
+                dft_length,
+                sample_rate,
+                lower_edge_hertz,
+                upper_edge_hertz,
+            ],
+            outputs=[output],
+            name="test_melweightmatrix",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/min.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/min.py
new file mode 100644
index 0000000000000000000000000000000000000000..82d407f07f9efe974fa3396fece988d54799f9e1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/min.py
@@ -0,0 +1,66 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.backend.test.case.utils import all_numeric_dtypes
+
+
+class Min(Base):
+    @staticmethod
+    def export() -> None:
+        data_0 = np.array([3, 2, 1]).astype(np.float32)
+        data_1 = np.array([1, 4, 4]).astype(np.float32)
+        data_2 = np.array([2, 5, 0]).astype(np.float32)
+        result = np.array([1, 2, 0]).astype(np.float32)
+        node = onnx.helper.make_node(
+            "Min",
+            inputs=["data_0", "data_1", "data_2"],
+            outputs=["result"],
+        )
+        expect(
+            node,
+            inputs=[data_0, data_1, data_2],
+            outputs=[result],
+            name="test_min_example",
+        )
+
+        node = onnx.helper.make_node(
+            "Min",
+            inputs=["data_0"],
+            outputs=["result"],
+        )
+        expect(node, inputs=[data_0], outputs=[data_0], name="test_min_one_input")
+
+        result = np.minimum(data_0, data_1)
+        node = onnx.helper.make_node(
+            "Min",
+            inputs=["data_0", "data_1"],
+            outputs=["result"],
+        )
+        expect(
+            node, inputs=[data_0, data_1], outputs=[result], name="test_min_two_inputs"
+        )
+
+    @staticmethod
+    def export_min_all_numeric_types() -> None:
+        for op_dtype in all_numeric_dtypes:
+            data_0 = np.array([3, 2, 1]).astype(op_dtype)
+            data_1 = np.array([1, 4, 4]).astype(op_dtype)
+            result = np.array([1, 2, 1]).astype(op_dtype)
+            node = onnx.helper.make_node(
+                "Min",
+                inputs=["data_0", "data_1"],
+                outputs=["result"],
+            )
+            expect(
+                node,
+                inputs=[data_0, data_1],
+                outputs=[result],
+                name=f"test_min_{np.dtype(op_dtype).name}",
+            )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mish.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mish.py
new file mode 100644
index 0000000000000000000000000000000000000000..0359ebb32d34858e0522bb762af8f0c1ab219d7e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mish.py
@@ -0,0 +1,23 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Mish(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node("Mish", inputs=["X"], outputs=["Y"])
+
+        input_data = np.linspace(-10, 10, 10000, dtype=np.float32)
+
+        # Calculate expected output data
+        expected_output = input_data * np.tanh(np.log1p(np.exp(input_data)))
+
+        expect(node, inputs=[input_data], outputs=[expected_output], name="test_mish")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mod.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mod.py
new file mode 100644
index 0000000000000000000000000000000000000000..c76613a022b5b0bf08fd71955169db96136674ff
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mod.py
@@ -0,0 +1,177 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Mod(Base):
+    @staticmethod
+    def export_mod_mixed_sign_float64() -> None:
+        node = onnx.helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=1)
+
+        x = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float64)
+        y = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float64)
+        z = np.fmod(x, y)  # expected output [-0.1,  0.4,  5. ,  0.1, -0.4,  3.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_float64")
+
+    @staticmethod
+    def export_mod_mixed_sign_float32() -> None:
+        node = onnx.helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=1)
+
+        x = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float32)
+        y = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float32)
+        z = np.fmod(
+            x, y
+        )  # expected output [-0.10000038, 0.39999962, 5. , 0.10000038, -0.39999962, 3.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_float32")
+
+    @staticmethod
+    def export_mod_mixed_sign_float16() -> None:
+        node = onnx.helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=1)
+
+        x = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float16)
+        y = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float16)
+        z = np.fmod(
+            x, y
+        )  # expected output [-0.10156, 0.3984 , 5. , 0.10156, -0.3984 ,  3.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_float16")
+
+    @staticmethod
+    def export_mod_mixed_sign_int64() -> None:
+        node = onnx.helper.make_node(
+            "Mod",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)
+        y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)
+        z = np.mod(x, y)  # expected output [ 0, -2,  5,  0,  2,  3]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_int64")
+
+    @staticmethod
+    def export_mod_mixed_sign_int32() -> None:
+        node = onnx.helper.make_node(
+            "Mod",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int32)
+        y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int32)
+        z = np.mod(x, y)  # expected output [ 0, -2,  5,  0,  2,  3]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_int32")
+
+    @staticmethod
+    def export_mod_mixed_sign_int16() -> None:
+        node = onnx.helper.make_node(
+            "Mod",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int16)
+        y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int16)
+        z = np.mod(x, y)  # expected output [ 0, -2,  5,  0,  2,  3]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_int16")
+
+    @staticmethod
+    def export_mod_mixed_sign_int8() -> None:
+        node = onnx.helper.make_node(
+            "Mod",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int8)
+        y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int8)
+        z = np.mod(x, y)  # expected output [ 0, -2,  5,  0,  2,  3]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_int8")
+
+    @staticmethod
+    def export_mod_uint8() -> None:
+        node = onnx.helper.make_node(
+            "Mod",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([4, 7, 5]).astype(np.uint8)
+        y = np.array([2, 3, 8]).astype(np.uint8)
+        z = np.mod(x, y)  # expected output [0, 1, 5]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_uint8")
+
+    @staticmethod
+    def export_mod_uint16() -> None:
+        node = onnx.helper.make_node(
+            "Mod",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([4, 7, 5]).astype(np.uint16)
+        y = np.array([2, 3, 8]).astype(np.uint16)
+        z = np.mod(x, y)  # expected output [0, 1, 5]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_uint16")
+
+    @staticmethod
+    def export_mod_uint32() -> None:
+        node = onnx.helper.make_node(
+            "Mod",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([4, 7, 5]).astype(np.uint32)
+        y = np.array([2, 3, 8]).astype(np.uint32)
+        z = np.mod(x, y)  # expected output [0, 1, 5]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_uint32")
+
+    @staticmethod
+    def export_mod_uint64() -> None:
+        node = onnx.helper.make_node(
+            "Mod",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([4, 7, 5]).astype(np.uint64)
+        y = np.array([2, 3, 8]).astype(np.uint64)
+        z = np.mod(x, y)  # expected output [0, 1, 5]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_uint64")
+
+    @staticmethod
+    def export_mod_int64_fmod() -> None:
+        node = onnx.helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=1)
+
+        x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)
+        y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)
+        z = np.fmod(x, y)  # expected output [ 0,  1,  5,  0, -1,  3]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_int64_fmod")
+
+    @staticmethod
+    def export_mod_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "Mod",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.arange(0, 30).reshape([3, 2, 5]).astype(np.int32)
+        y = np.array([7]).astype(np.int32)
+        z = np.mod(x, y)
+        #   array([[[0, 1, 2, 3, 4],
+        #     [5, 6, 0, 1, 2]],
+
+        #    [[3, 4, 5, 6, 0],
+        #     [1, 2, 3, 4, 5]],
+
+        #    [[6, 0, 1, 2, 3],
+        #     [4, 5, 6, 0, 1]]], dtype=int32)
+        expect(node, inputs=[x, y], outputs=[z], name="test_mod_broadcast")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/momentum.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/momentum.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e5c4f822b24d6e9272bc1abba6bc0dadcf9a652
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/momentum.py
@@ -0,0 +1,162 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.defs import AI_ONNX_PREVIEW_TRAINING_DOMAIN
+
+
+def apply_momentum(r, t, x, g, v, norm_coefficient, alpha, beta):
+    # Add gradient of regularization term.
+    g_regularized = norm_coefficient * x + g
+    # Coefficient of gradient should be 1 at the first iteration.
+    beta_adjusted = beta if t > 0 else 1
+    # Update momentum.
+    v_new = alpha * v + beta_adjusted * g_regularized
+    # Apply SG with momentum update rule.
+    x_new = x - r * v_new
+    return x_new, v_new
+
+
+def apply_nesterov(r, t, x, g, v, norm_coefficient, alpha, beta):
+    # Add gradient of regularization term.
+    g_regularized = norm_coefficient * x + g
+    # Coefficient of gradient should be 1 at the first iteration.
+    beta_adjusted = beta if t > 0 else 1
+    # Update momentum.
+    v_new = alpha * v + beta_adjusted * g_regularized
+    # Apply Nesterov with momentum update rule.
+    x_new = x - r * (g_regularized + alpha * v_new)
+    return x_new, v_new
+
+
+class Momentum(Base):
+    @staticmethod
+    def export_momentum() -> None:
+        # Define operator attributes.
+        norm_coefficient = 0.001
+        alpha = 0.95
+        beta = 0.1
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "Momentum",
+            inputs=["R", "T", "X", "G", "V"],
+            outputs=["X_new", "V_new"],
+            norm_coefficient=norm_coefficient,
+            alpha=alpha,
+            beta=beta,
+            mode="standard",
+            domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
+        )
+
+        # Define operator inputs.
+        r = np.array(0.1, dtype=np.float32)  # scalar
+        t = np.array(0, dtype=np.int64)  # scalar
+        x = np.array([1.2, 2.8], dtype=np.float32)
+        g = np.array([-0.94, -2.5], dtype=np.float32)
+        v = np.array([1.7, 3.6], dtype=np.float32)
+
+        # Compute expected outputs of Momentum.
+        x_new, v_new = apply_momentum(r, t, x, g, v, norm_coefficient, alpha, beta)
+
+        # Check results.
+        expect(
+            node,
+            inputs=[r, t, x, g, v],
+            outputs=[x_new, v_new],
+            name="test_momentum",
+            opset_imports=[
+                onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)
+            ],
+        )
+
+    @staticmethod
+    def export_nesterov_momentum() -> None:
+        # Define operator attributes.
+        norm_coefficient = 0.01
+        alpha = 0.95
+        beta = 1.0
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "Momentum",
+            inputs=["R", "T", "X", "G", "V"],
+            outputs=["X_new", "V_new"],
+            norm_coefficient=norm_coefficient,
+            alpha=alpha,
+            beta=beta,
+            mode="nesterov",
+            domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
+        )
+
+        # Define operator inputs.
+        r = np.array(0.1, dtype=np.float32)  # scalar
+        t = np.array(0, dtype=np.int64)  # scalar
+        x = np.array([1.2, 2.8], dtype=np.float32)
+        g = np.array([-0.94, -2.5], dtype=np.float32)
+        v = np.array([1.7, 3.6], dtype=np.float32)
+
+        # Compute expected outputs of Momentum.
+        x_new, v_new = apply_nesterov(r, t, x, g, v, norm_coefficient, alpha, beta)
+
+        # Check results.
+        expect(
+            node,
+            inputs=[r, t, x, g, v],
+            outputs=[x_new, v_new],
+            name="test_nesterov_momentum",
+            opset_imports=[
+                onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)
+            ],
+        )
+
+    @staticmethod
+    def export_momentum_multiple() -> None:
+        # Define operator attributes.
+        norm_coefficient = 0.001
+        alpha = 0.95
+        beta = 0.85
+
+        node = onnx.helper.make_node(
+            "Momentum",
+            inputs=["R", "T", "X1", "X2", "G1", "G2", "H1", "H2"],
+            outputs=["X1_new", "X2_new", "V1_new", "V2_new"],
+            norm_coefficient=norm_coefficient,
+            alpha=alpha,
+            beta=beta,
+            mode="standard",
+            domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
+        )
+
+        # Define operator inputs.
+        r = np.array(0.1, dtype=np.float32)  # scalar
+        t = np.array(0, dtype=np.int64)  # scalar
+
+        x1 = np.array([1.0], dtype=np.float32)
+        g1 = np.array([-1.0], dtype=np.float32)
+        v1 = np.array([2.0], dtype=np.float32)
+
+        x2 = np.array([1.0, 2.0], dtype=np.float32)
+        g2 = np.array([-1.0, -3.0], dtype=np.float32)
+        v2 = np.array([4.0, 1.0], dtype=np.float32)
+
+        # Compute expected outputs of Momentum.
+        x1_new, v1_new = apply_momentum(r, t, x1, g1, v1, norm_coefficient, alpha, beta)
+        x2_new, v2_new = apply_momentum(r, t, x2, g2, v2, norm_coefficient, alpha, beta)
+
+        # Check results.
+        expect(
+            node,
+            inputs=[r, t, x1, x2, g1, g2, v1, v2],
+            outputs=[x1_new, x2_new, v1_new, v2_new],
+            name="test_momentum_multiple",
+            opset_imports=[
+                onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)
+            ],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mul.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mul.py
new file mode 100644
index 0000000000000000000000000000000000000000..b279173b0f82c018c7e4c6af308e11de222e51f4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/mul.py
@@ -0,0 +1,73 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Mul(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Mul",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([1, 2, 3]).astype(np.float32)
+        y = np.array([4, 5, 6]).astype(np.float32)
+        z = x * y  # expected output [4., 10., 18.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_mul_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(3, 4, 5).astype(np.float32)
+        z = x * y
+        expect(node, inputs=[x, y], outputs=[z], name="test_mul")
+
+        x = np.random.randint(4, size=(3, 4, 5), dtype=np.int8)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.int8)
+        z = x * y
+        expect(node, inputs=[x, y], outputs=[z], name="test_mul_int8")
+
+        x = np.random.randint(4, size=(3, 4, 5), dtype=np.int16)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.int16)
+        z = x * y
+        expect(node, inputs=[x, y], outputs=[z], name="test_mul_int16")
+
+        x = np.random.randint(4, size=(3, 4, 5), dtype=np.uint8)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint8)
+        z = x * y
+        expect(node, inputs=[x, y], outputs=[z], name="test_mul_uint8")
+
+        x = np.random.randint(4, size=(3, 4, 5), dtype=np.uint16)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint16)
+        z = x * y
+        expect(node, inputs=[x, y], outputs=[z], name="test_mul_uint16")
+
+        x = np.random.randint(4, size=(3, 4, 5), dtype=np.uint32)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint32)
+        z = x * y
+        expect(node, inputs=[x, y], outputs=[z], name="test_mul_uint32")
+
+        x = np.random.randint(4, size=(3, 4, 5), dtype=np.uint64)
+        y = np.random.randint(24, size=(3, 4, 5), dtype=np.uint64)
+        z = x * y
+        expect(node, inputs=[x, y], outputs=[z], name="test_mul_uint64")
+
+    @staticmethod
+    def export_mul_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "Mul",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.random.randn(5).astype(np.float32)
+        z = x * y
+        expect(node, inputs=[x, y], outputs=[z], name="test_mul_bcast")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/neg.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/neg.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b86e155b6f97f644e50bc3cf292ff0907f6fdfe
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/neg.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Neg(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Neg",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-4, 2]).astype(np.float32)
+        y = np.negative(x)  # expected output [4., -2.],
+        expect(node, inputs=[x], outputs=[y], name="test_neg_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.negative(x)
+        expect(node, inputs=[x], outputs=[y], name="test_neg")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/negativeloglikelihoodloss.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/negativeloglikelihoodloss.py
new file mode 100644
index 0000000000000000000000000000000000000000..944c69b563b74184f8c8f2e099d2889291140509
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/negativeloglikelihoodloss.py
@@ -0,0 +1,586 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def compute_negative_log_likelihood_loss(
+    input, target, weight=None, reduction="mean", ignore_index=None
+):
+    input_shape = input.shape
+    if len(input_shape) == 1:
+        raise RuntimeError("Unsupported shape")
+
+    target_shape = target.shape
+    N = input_shape[0]
+    C = input_shape[1]
+
+    # initialize the positional weights when required
+    gather_weight = None
+    if weight is not None:
+        # setting mode='clip' to deal with ignore_index > C or < 0 cases.
+        # when the target value is > C or < 0, it doesn't matter which value we are
+        # taking in gather_weight, since it will be set to 0 in the following if-block
+        # use np.int32 to make it compatible with x86 machines
+        gather_weight = np.take(weight, np.array(target, dtype=np.int32), mode="clip")
+        # set `ignore_index`'s loss weight to 0.
+        # The loss tensor will be multiplied by this weight tensor,
+        # so `ingore_index`'s loss value will be eliminated.
+        if ignore_index is not None:
+            gather_weight = np.where(target == ignore_index, 0, gather_weight).astype(
+                dtype=np.float32
+            )
+    elif ignore_index is not None:
+        gather_weight = np.where(target == ignore_index, 0, 1).astype(dtype=np.float32)
+
+    # if input is 4-d and above, make it 3-d
+    if len(input_shape) != 3:
+        input = input.reshape((N, C, -1))
+        target = target.reshape((N, -1))
+
+    # Get a dimension from the reshaped input.
+    # If the original input shape is [N, C, H, W],
+    # the D here should be H * W because we reshape
+    # [N, C, H, W] to [N, C, H * W].
+    D = input.shape[2]
+    neg_gather_element_input = np.zeros((N, D), dtype=np.float32)
+    for i in range(N):
+        for d in range(D):
+            if target[i][d] != ignore_index:
+                neg_gather_element_input[i][d] = -input[i][target[i][d]][d]
+
+    loss = neg_gather_element_input
+
+    # if the input was 4-d or above reshape to the right shape
+    if len(input_shape) != 3:
+        loss = loss.reshape(target_shape)
+
+    # apply the weights when required
+    if gather_weight is not None:
+        loss = gather_weight * loss
+        if reduction == "mean":
+            loss = loss.sum() / gather_weight.sum()
+            return loss
+
+    if reduction == "mean":
+        loss = np.mean(loss)
+    elif reduction == "sum":
+        loss = np.sum(loss)
+    return loss
+
+
+class NegativeLogLikelihoodLoss(Base):
+    @staticmethod
+    def export_input_shape_is_NC() -> None:
+        reduction = "none"
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C = 3, 5
+        np.random.seed(0)
+        input = np.random.rand(N, C).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N,)).astype(np.int64)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=None, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NC",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2() -> None:
+        reduction = "none"
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2 = 3, 5, 6, 6
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=None, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2_reduction_mean() -> None:
+        reduction = "mean"
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2 = 3, 5, 6, 6
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=None, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2_reduction_mean",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2_reduction_sum() -> None:
+        reduction = "sum"
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2 = 3, 5, 6, 6
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=None, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2_reduction_sum",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2_with_weight() -> None:
+        reduction = "none"
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target", "weight"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2 = 3, 5, 6, 6
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)
+        weight = np.random.rand(C).astype(np.float32)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=weight, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target, weight],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2_with_weight",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2_with_weight_reduction_mean() -> None:
+        reduction = "mean"
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target", "weight"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2 = 3, 5, 6, 6
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)
+        weight = np.random.rand(C).astype(np.float32)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=weight, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target, weight],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2_with_weight_reduction_mean",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2_with_weight_reduction_sum() -> None:
+        reduction = "sum"
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target", "weight"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2 = 3, 5, 6, 6
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)
+        weight = np.random.rand(C).astype(np.float32)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=weight, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target, weight],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2_with_weight_reduction_sum",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2_with_weight_reduction_sum_ii() -> None:
+        reduction = "sum"
+        ignore_index = np.int64(0)
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target", "weight"],
+            outputs=["loss"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, dim1, dim2 = 3, 5, 6, 6
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)
+        target[0][0][0] = np.int64(0)
+        weight = np.random.rand(C).astype(np.float32)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=weight, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[input, target, weight],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2_with_weight_reduction_sum_ii",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2_no_weight_reduction_mean_ii() -> None:
+        reduction = "mean"
+        ignore_index = np.int64(1)
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target"],
+            outputs=["loss"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, dim1, dim2 = 3, 5, 6, 6
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1, dim2)).astype(np.int64)
+        target[0][0][0] = np.int64(1)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[input, target],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2_no_weight_reduction_mean_ii",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1() -> None:
+        reduction = "mean"
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, d1 = 3, 5, 2
+        np.random.seed(0)
+        input = np.random.rand(N, C, d1).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=None, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1_weight() -> None:
+        reduction = "mean"
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target", "weight"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, d1 = 3, 5, 2
+        np.random.seed(0)
+        input = np.random.rand(N, C, d1).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)
+        weight = np.random.rand(C).astype(np.float32)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=weight, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target, weight],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1_weight",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1_ii() -> None:
+        reduction = "mean"
+        ignore_index = np.int64(1)
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target"],
+            outputs=["loss"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, d1 = 3, 5, 2
+        np.random.seed(0)
+        input = np.random.rand(N, C, d1).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)
+        target[0][0] = np.int64(1)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=None, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[input, target],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1_ii",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1_weight_ii() -> None:
+        reduction = "mean"
+        ignore_index = np.int64(1)
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target", "weight"],
+            outputs=["loss"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, d1 = 3, 5, 2
+        np.random.seed(0)
+        input = np.random.rand(N, C, d1).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, d1)).astype(np.int64)
+        target[0][0] = np.int64(1)
+        weight = np.random.rand(C).astype(np.float32)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=weight, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[input, target, weight],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1_weight_ii",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3d4d5_mean_weight() -> None:
+        reduction = "mean"
+
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target", "weight"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)
+        target = np.random.randint(
+            0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)
+        ).astype(np.int64)
+        weight = np.random.rand(C).astype(np.float32)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=weight, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target, weight],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2d3d4d5_mean_weight",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3d4d5_none_no_weight() -> None:
+        reduction = "none"
+
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target"],
+            outputs=["loss"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)
+        target = np.random.randint(
+            0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)
+        ).astype(np.int64)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, reduction=reduction
+        )
+
+        expect(
+            node,
+            inputs=[input, target],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2d3d4d5_none_no_weight",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1_mean_weight_negative_ii() -> None:
+        reduction = "mean"
+        ignore_index = np.int64(-1)
+
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target", "weight"],
+            outputs=["loss"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, dim1 = 3, 5, 6
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1)).astype(np.int64)
+        target[0][0] = -1
+        weight = np.random.rand(C).astype(np.float32)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=weight, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[input, target, weight],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1_mean_weight_negative_ii",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3_none_no_weight_negative_ii() -> None:
+        reduction = "none"
+        ignore_index = np.int64(-5)
+
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target"],
+            outputs=["loss"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5
+        np.random.seed(0)
+        input = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3)).astype(
+            np.int64
+        )
+        target[0][0][0][0] = -5
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[input, target],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2d3_none_no_weight_negative_ii",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3_sum_weight_high_ii() -> None:
+        reduction = "sum"
+        ignore_index = np.int64(10)
+
+        node = onnx.helper.make_node(
+            "NegativeLogLikelihoodLoss",
+            inputs=["input", "target", "weight"],
+            outputs=["loss"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C = 3, 5
+        np.random.seed(0)
+        input = np.random.rand(N, C).astype(np.float32)
+        target = np.random.randint(0, high=C, size=(N)).astype(np.int64)
+        target[0] = 10
+        weight = np.random.rand(C).astype(np.float32)
+
+        negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
+            input, target, weight=weight, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[input, target, weight],
+            outputs=[negative_log_likelihood_loss],
+            name="test_nllloss_NCd1d2d3_sum_weight_high_ii",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/nonmaxsuppression.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/nonmaxsuppression.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ab4017835f93f5c7d49ff95eaeef0251e7a310a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/nonmaxsuppression.py
@@ -0,0 +1,420 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class NonMaxSuppression(Base):
+    @staticmethod
+    def export_nonmaxsuppression_suppress_by_IOU() -> None:
+        node = onnx.helper.make_node(
+            "NonMaxSuppression",
+            inputs=[
+                "boxes",
+                "scores",
+                "max_output_boxes_per_class",
+                "iou_threshold",
+                "score_threshold",
+            ],
+            outputs=["selected_indices"],
+        )
+        boxes = np.array(
+            [
+                [
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.1, 1.0, 1.1],
+                    [0.0, -0.1, 1.0, 0.9],
+                    [0.0, 10.0, 1.0, 11.0],
+                    [0.0, 10.1, 1.0, 11.1],
+                    [0.0, 100.0, 1.0, 101.0],
+                ]
+            ]
+        ).astype(np.float32)
+        scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
+        max_output_boxes_per_class = np.array([3]).astype(np.int64)
+        iou_threshold = np.array([0.5]).astype(np.float32)
+        score_threshold = np.array([0.0]).astype(np.float32)
+        selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[
+                boxes,
+                scores,
+                max_output_boxes_per_class,
+                iou_threshold,
+                score_threshold,
+            ],
+            outputs=[selected_indices],
+            name="test_nonmaxsuppression_suppress_by_IOU",
+        )
+
+    @staticmethod
+    def export_nonmaxsuppression_suppress_by_IOU_and_scores() -> None:
+        node = onnx.helper.make_node(
+            "NonMaxSuppression",
+            inputs=[
+                "boxes",
+                "scores",
+                "max_output_boxes_per_class",
+                "iou_threshold",
+                "score_threshold",
+            ],
+            outputs=["selected_indices"],
+        )
+        boxes = np.array(
+            [
+                [
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.1, 1.0, 1.1],
+                    [0.0, -0.1, 1.0, 0.9],
+                    [0.0, 10.0, 1.0, 11.0],
+                    [0.0, 10.1, 1.0, 11.1],
+                    [0.0, 100.0, 1.0, 101.0],
+                ]
+            ]
+        ).astype(np.float32)
+        scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
+        max_output_boxes_per_class = np.array([3]).astype(np.int64)
+        iou_threshold = np.array([0.5]).astype(np.float32)
+        score_threshold = np.array([0.4]).astype(np.float32)
+        selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[
+                boxes,
+                scores,
+                max_output_boxes_per_class,
+                iou_threshold,
+                score_threshold,
+            ],
+            outputs=[selected_indices],
+            name="test_nonmaxsuppression_suppress_by_IOU_and_scores",
+        )
+
+    @staticmethod
+    def export_nonmaxsuppression_flipped_coordinates() -> None:
+        node = onnx.helper.make_node(
+            "NonMaxSuppression",
+            inputs=[
+                "boxes",
+                "scores",
+                "max_output_boxes_per_class",
+                "iou_threshold",
+                "score_threshold",
+            ],
+            outputs=["selected_indices"],
+        )
+        boxes = np.array(
+            [
+                [
+                    [1.0, 1.0, 0.0, 0.0],
+                    [0.0, 0.1, 1.0, 1.1],
+                    [0.0, 0.9, 1.0, -0.1],
+                    [0.0, 10.0, 1.0, 11.0],
+                    [1.0, 10.1, 0.0, 11.1],
+                    [1.0, 101.0, 0.0, 100.0],
+                ]
+            ]
+        ).astype(np.float32)
+        scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
+        max_output_boxes_per_class = np.array([3]).astype(np.int64)
+        iou_threshold = np.array([0.5]).astype(np.float32)
+        score_threshold = np.array([0.0]).astype(np.float32)
+        selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[
+                boxes,
+                scores,
+                max_output_boxes_per_class,
+                iou_threshold,
+                score_threshold,
+            ],
+            outputs=[selected_indices],
+            name="test_nonmaxsuppression_flipped_coordinates",
+        )
+
+    @staticmethod
+    def export_nonmaxsuppression_limit_output_size() -> None:
+        node = onnx.helper.make_node(
+            "NonMaxSuppression",
+            inputs=[
+                "boxes",
+                "scores",
+                "max_output_boxes_per_class",
+                "iou_threshold",
+                "score_threshold",
+            ],
+            outputs=["selected_indices"],
+        )
+        boxes = np.array(
+            [
+                [
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.1, 1.0, 1.1],
+                    [0.0, -0.1, 1.0, 0.9],
+                    [0.0, 10.0, 1.0, 11.0],
+                    [0.0, 10.1, 1.0, 11.1],
+                    [0.0, 100.0, 1.0, 101.0],
+                ]
+            ]
+        ).astype(np.float32)
+        scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
+        max_output_boxes_per_class = np.array([2]).astype(np.int64)
+        iou_threshold = np.array([0.5]).astype(np.float32)
+        score_threshold = np.array([0.0]).astype(np.float32)
+        selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[
+                boxes,
+                scores,
+                max_output_boxes_per_class,
+                iou_threshold,
+                score_threshold,
+            ],
+            outputs=[selected_indices],
+            name="test_nonmaxsuppression_limit_output_size",
+        )
+
+    @staticmethod
+    def export_nonmaxsuppression_single_box() -> None:
+        node = onnx.helper.make_node(
+            "NonMaxSuppression",
+            inputs=[
+                "boxes",
+                "scores",
+                "max_output_boxes_per_class",
+                "iou_threshold",
+                "score_threshold",
+            ],
+            outputs=["selected_indices"],
+        )
+        boxes = np.array([[[0.0, 0.0, 1.0, 1.0]]]).astype(np.float32)
+        scores = np.array([[[0.9]]]).astype(np.float32)
+        max_output_boxes_per_class = np.array([3]).astype(np.int64)
+        iou_threshold = np.array([0.5]).astype(np.float32)
+        score_threshold = np.array([0.0]).astype(np.float32)
+        selected_indices = np.array([[0, 0, 0]]).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[
+                boxes,
+                scores,
+                max_output_boxes_per_class,
+                iou_threshold,
+                score_threshold,
+            ],
+            outputs=[selected_indices],
+            name="test_nonmaxsuppression_single_box",
+        )
+
+    @staticmethod
+    def export_nonmaxsuppression_identical_boxes() -> None:
+        node = onnx.helper.make_node(
+            "NonMaxSuppression",
+            inputs=[
+                "boxes",
+                "scores",
+                "max_output_boxes_per_class",
+                "iou_threshold",
+                "score_threshold",
+            ],
+            outputs=["selected_indices"],
+        )
+        boxes = np.array(
+            [
+                [
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.0, 1.0, 1.0],
+                ]
+            ]
+        ).astype(np.float32)
+        scores = np.array(
+            [[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]]
+        ).astype(np.float32)
+        max_output_boxes_per_class = np.array([3]).astype(np.int64)
+        iou_threshold = np.array([0.5]).astype(np.float32)
+        score_threshold = np.array([0.0]).astype(np.float32)
+        selected_indices = np.array([[0, 0, 0]]).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[
+                boxes,
+                scores,
+                max_output_boxes_per_class,
+                iou_threshold,
+                score_threshold,
+            ],
+            outputs=[selected_indices],
+            name="test_nonmaxsuppression_identical_boxes",
+        )
+
+    @staticmethod
+    def export_nonmaxsuppression_center_point_box_format() -> None:
+        node = onnx.helper.make_node(
+            "NonMaxSuppression",
+            inputs=[
+                "boxes",
+                "scores",
+                "max_output_boxes_per_class",
+                "iou_threshold",
+                "score_threshold",
+            ],
+            outputs=["selected_indices"],
+            center_point_box=1,
+        )
+        boxes = np.array(
+            [
+                [
+                    [0.5, 0.5, 1.0, 1.0],
+                    [0.5, 0.6, 1.0, 1.0],
+                    [0.5, 0.4, 1.0, 1.0],
+                    [0.5, 10.5, 1.0, 1.0],
+                    [0.5, 10.6, 1.0, 1.0],
+                    [0.5, 100.5, 1.0, 1.0],
+                ]
+            ]
+        ).astype(np.float32)
+        scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
+        max_output_boxes_per_class = np.array([3]).astype(np.int64)
+        iou_threshold = np.array([0.5]).astype(np.float32)
+        score_threshold = np.array([0.0]).astype(np.float32)
+        selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[
+                boxes,
+                scores,
+                max_output_boxes_per_class,
+                iou_threshold,
+                score_threshold,
+            ],
+            outputs=[selected_indices],
+            name="test_nonmaxsuppression_center_point_box_format",
+        )
+
+    @staticmethod
+    def export_nonmaxsuppression_two_classes() -> None:
+        node = onnx.helper.make_node(
+            "NonMaxSuppression",
+            inputs=[
+                "boxes",
+                "scores",
+                "max_output_boxes_per_class",
+                "iou_threshold",
+                "score_threshold",
+            ],
+            outputs=["selected_indices"],
+        )
+        boxes = np.array(
+            [
+                [
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.1, 1.0, 1.1],
+                    [0.0, -0.1, 1.0, 0.9],
+                    [0.0, 10.0, 1.0, 11.0],
+                    [0.0, 10.1, 1.0, 11.1],
+                    [0.0, 100.0, 1.0, 101.0],
+                ]
+            ]
+        ).astype(np.float32)
+        scores = np.array(
+            [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
+        ).astype(np.float32)
+        max_output_boxes_per_class = np.array([2]).astype(np.int64)
+        iou_threshold = np.array([0.5]).astype(np.float32)
+        score_threshold = np.array([0.0]).astype(np.float32)
+        selected_indices = np.array(
+            [[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]
+        ).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[
+                boxes,
+                scores,
+                max_output_boxes_per_class,
+                iou_threshold,
+                score_threshold,
+            ],
+            outputs=[selected_indices],
+            name="test_nonmaxsuppression_two_classes",
+        )
+
+    @staticmethod
+    def export_nonmaxsuppression_two_batches() -> None:
+        node = onnx.helper.make_node(
+            "NonMaxSuppression",
+            inputs=[
+                "boxes",
+                "scores",
+                "max_output_boxes_per_class",
+                "iou_threshold",
+                "score_threshold",
+            ],
+            outputs=["selected_indices"],
+        )
+        boxes = np.array(
+            [
+                [
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.1, 1.0, 1.1],
+                    [0.0, -0.1, 1.0, 0.9],
+                    [0.0, 10.0, 1.0, 11.0],
+                    [0.0, 10.1, 1.0, 11.1],
+                    [0.0, 100.0, 1.0, 101.0],
+                ],
+                [
+                    [0.0, 0.0, 1.0, 1.0],
+                    [0.0, 0.1, 1.0, 1.1],
+                    [0.0, -0.1, 1.0, 0.9],
+                    [0.0, 10.0, 1.0, 11.0],
+                    [0.0, 10.1, 1.0, 11.1],
+                    [0.0, 100.0, 1.0, 101.0],
+                ],
+            ]
+        ).astype(np.float32)
+        scores = np.array(
+            [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]], [[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
+        ).astype(np.float32)
+        max_output_boxes_per_class = np.array([2]).astype(np.int64)
+        iou_threshold = np.array([0.5]).astype(np.float32)
+        score_threshold = np.array([0.0]).astype(np.float32)
+        selected_indices = np.array(
+            [[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]
+        ).astype(np.int64)
+
+        expect(
+            node,
+            inputs=[
+                boxes,
+                scores,
+                max_output_boxes_per_class,
+                iou_threshold,
+                score_threshold,
+            ],
+            outputs=[selected_indices],
+            name="test_nonmaxsuppression_two_batches",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/nonzero.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/nonzero.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca2bf184d4f6920c545d1113182e18e12381fd97
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/nonzero.py
@@ -0,0 +1,26 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class NonZero(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "NonZero",
+            inputs=["condition"],
+            outputs=["result"],
+        )
+
+        condition = np.array([[1, 0], [1, 1]], dtype=bool)
+        result = np.array(
+            np.nonzero(condition), dtype=np.int64
+        )  # expected output [[0, 1, 1], [0, 0, 1]]
+        expect(node, inputs=[condition], outputs=[result], name="test_nonzero_example")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/not.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/not.py
new file mode 100644
index 0000000000000000000000000000000000000000..3843a7b9ab30c55f9425b8cdbe762b26f06c2151
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/not.py
@@ -0,0 +1,32 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Not(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Not",
+            inputs=["x"],
+            outputs=["not"],
+        )
+
+        # 2d
+        x = (np.random.randn(3, 4) > 0).astype(bool)
+        expect(node, inputs=[x], outputs=[np.logical_not(x)], name="test_not_2d")
+
+        # 3d
+        x = (np.random.randn(3, 4, 5) > 0).astype(bool)
+        expect(node, inputs=[x], outputs=[np.logical_not(x)], name="test_not_3d")
+
+        # 4d
+        x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
+        expect(node, inputs=[x], outputs=[np.logical_not(x)], name="test_not_4d")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/onehot.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/onehot.py
new file mode 100644
index 0000000000000000000000000000000000000000..aff549b2ea32984602b9e8d16d356d1f8f1a5c7f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/onehot.py
@@ -0,0 +1,126 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def one_hot(indices, depth, axis=-1, dtype=np.float32):
+    """Compute one hot from indices at a specific axis"""
+    values = np.asarray(indices)
+    rank = len(values.shape)
+    depth_range = np.arange(depth)
+    if axis < 0:
+        axis += rank + 1
+    ls = values.shape[0:axis]
+    rs = values.shape[axis:rank]
+    targets = np.reshape(
+        depth_range, (1,) * len(ls) + depth_range.shape + (1,) * len(rs)
+    )
+    values = np.reshape(np.mod(values, depth), (*ls, 1, *rs))
+    return np.asarray(targets == values, dtype=dtype)
+
+
+class OneHot(Base):
+    @staticmethod
+    def export_without_axis() -> None:
+        on_value = 5
+        off_value = 2
+        output_type = np.int32
+        node = onnx.helper.make_node(
+            "OneHot", inputs=["indices", "depth", "values"], outputs=["y"]
+        )
+        indices = np.array([0, 7, 8], dtype=np.int64)
+        depth = np.float32(12)
+        values = np.array([off_value, on_value], dtype=output_type)
+        y = one_hot(indices, depth, dtype=output_type)
+        y = y * (on_value - off_value) + off_value
+        expect(
+            node,
+            inputs=[indices, depth, values],
+            outputs=[y],
+            name="test_onehot_without_axis",
+        )
+
+    @staticmethod
+    def export_with_axis() -> None:
+        axisValue = 1
+        on_value = 3
+        off_value = 1
+        output_type = np.float32
+        node = onnx.helper.make_node(
+            "OneHot",
+            inputs=["indices", "depth", "values"],
+            outputs=["y"],
+            axis=axisValue,
+        )
+        indices = np.array([[1, 9], [2, 4]], dtype=np.float32)
+        depth = np.float32(10)
+        values = np.array([off_value, on_value], dtype=output_type)
+        y = one_hot(indices, depth, axis=axisValue, dtype=output_type)
+        y = y * (on_value - off_value) + off_value
+        expect(
+            node,
+            inputs=[indices, depth, values],
+            outputs=[y],
+            name="test_onehot_with_axis",
+        )
+
+    @staticmethod
+    def export_with_negative_indices() -> None:
+        axisValue = 1
+        on_value = 3
+        off_value = 1
+        output_type = np.float32
+        node = onnx.helper.make_node(
+            "OneHot",
+            inputs=["indices", "depth", "values"],
+            outputs=["y"],
+            axis=axisValue,
+        )
+        indices = np.array([0, -7, -8], dtype=np.int64)
+
+        # print(y)
+        # [[3. 1. 1. 1. 1. 1. 1. 1. 1. 1.]
+        #  [1. 1. 1. 3. 1. 1. 1. 1. 1. 1.]
+        #  [1. 1. 3. 1. 1. 1. 1. 1. 1. 1.]]
+
+        depth = np.float32(10)
+        values = np.array([off_value, on_value], dtype=output_type)
+        y = one_hot(indices, depth, axis=axisValue, dtype=output_type)
+        y = y * (on_value - off_value) + off_value
+        expect(
+            node,
+            inputs=[indices, depth, values],
+            outputs=[y],
+            name="test_onehot_negative_indices",
+        )
+
+    @staticmethod
+    def export_with_negative_axis() -> None:
+        axisValue = -2
+        on_value = 3
+        off_value = 1
+        output_type = np.float32
+        node = onnx.helper.make_node(
+            "OneHot",
+            inputs=["indices", "depth", "values"],
+            outputs=["y"],
+            axis=axisValue,
+        )
+        indices = np.array([[1, 9], [2, 4]], dtype=np.float32)
+        depth = np.float32(10)
+        values = np.array([off_value, on_value], dtype=output_type)
+        y = one_hot(indices, depth, axis=axisValue, dtype=output_type)
+        y = y * (on_value - off_value) + off_value
+        expect(
+            node,
+            inputs=[indices, depth, values],
+            outputs=[y],
+            name="test_onehot_with_negative_axis",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/optionalgetelement.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/optionalgetelement.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d7568db9aa181fc267e3312677739f407594cdc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/optionalgetelement.py
@@ -0,0 +1,80 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def optional_get_element_reference_implementation(optional: Any | None) -> Any:
+    assert optional is not None
+    return optional
+
+
+class OptionalHasElement(Base):
+    @staticmethod
+    def export_get_element_tensor() -> None:
+        optional = np.array([1, 2, 3, 4]).astype(np.float32)
+        tensor_type_proto = onnx.helper.make_tensor_type_proto(
+            elem_type=onnx.TensorProto.FLOAT,
+            shape=[
+                4,
+            ],
+        )
+        optional_type_proto = onnx.helper.make_optional_type_proto(tensor_type_proto)
+
+        node = onnx.helper.make_node(
+            "OptionalGetElement", inputs=["optional_input"], outputs=["output"]
+        )
+        output = optional_get_element_reference_implementation(optional)
+        expect(
+            node,
+            inputs=[optional],
+            outputs=[output],
+            input_type_protos=[optional_type_proto],
+            name="test_optional_get_element_optional_tensor",
+        )
+        expect(
+            node,
+            inputs=[optional],
+            outputs=[output],
+            input_type_protos=[tensor_type_proto],
+            name="test_optional_get_element_tensor",
+        )
+
+    @staticmethod
+    def export_get_element_sequence() -> None:
+        optional = [np.array([1, 2, 3, 4]).astype(np.int32)]
+        tensor_type_proto = onnx.helper.make_tensor_type_proto(
+            elem_type=onnx.TensorProto.INT32,
+            shape=[
+                4,
+            ],
+        )
+        seq_type_proto = onnx.helper.make_sequence_type_proto(tensor_type_proto)
+        optional_type_proto = onnx.helper.make_optional_type_proto(seq_type_proto)
+
+        node = onnx.helper.make_node(
+            "OptionalGetElement", inputs=["optional_input"], outputs=["output"]
+        )
+        output = optional_get_element_reference_implementation(optional)
+        expect(
+            node,
+            inputs=[optional],
+            outputs=[output],
+            input_type_protos=[optional_type_proto],
+            name="test_optional_get_element_optional_sequence",
+        )
+        expect(
+            node,
+            inputs=[optional],
+            outputs=[output],
+            input_type_protos=[seq_type_proto],
+            name="test_optional_get_element_sequence",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/optionalhaselement.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/optionalhaselement.py
new file mode 100644
index 0000000000000000000000000000000000000000..e24ac7f45a72fd64d9422bf6586399fd41615125
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/optionalhaselement.py
@@ -0,0 +1,94 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def optional_has_element_reference_implementation(
+    optional: np.ndarray | None,
+) -> np.ndarray:
+    if optional is None:
+        return np.array(False)
+    else:
+        return np.array(True)
+
+
+class OptionalHasElement(Base):
+    @staticmethod
+    def export() -> None:
+        optional = np.array([1, 2, 3, 4]).astype(np.float32)
+        tensor_type_proto = onnx.helper.make_tensor_type_proto(
+            elem_type=onnx.TensorProto.FLOAT,
+            shape=[
+                4,
+            ],
+        )
+        optional_type_proto = onnx.helper.make_optional_type_proto(tensor_type_proto)
+
+        # OptionalHasElement takes a tensor or optional as input
+        for input_type_protos in [tensor_type_proto, optional_type_proto]:
+            node = onnx.helper.make_node(
+                "OptionalHasElement", inputs=["optional_input"], outputs=["output"]
+            )
+            output = optional_has_element_reference_implementation(optional)
+            test_name = "test_optional_has_element_" + (
+                "optional_input"
+                if input_type_protos == optional_type_proto
+                else "tensor_input"
+            )
+            expect(
+                node,
+                inputs=[optional],
+                outputs=[output],
+                input_type_protos=[optional_type_proto],
+                name=test_name,
+            )
+
+    @staticmethod
+    def export_empty() -> None:
+        optional = None
+
+        tensor_type_proto = onnx.helper.make_tensor_type_proto(
+            elem_type=onnx.TensorProto.INT32, shape=[]
+        )
+        optional_type_proto = onnx.helper.make_optional_type_proto(tensor_type_proto)
+
+        # OptionalHasElement takes a tensor or optional as input
+        for input_type_proto in [tensor_type_proto, optional_type_proto]:
+            input_name_options = {
+                "empty": "optional_input",
+                "empty_no_input_name": "",
+                "empty_no_input": None,
+            }
+            for test_name_surfix, input_name in input_name_options.items():
+                if input_type_proto == tensor_type_proto and input_name:
+                    # the input tensor cannot be empty if input name is provided.
+                    continue
+                node = onnx.helper.make_node(
+                    "OptionalHasElement",
+                    inputs=[] if input_name is None else [input_name],
+                    outputs=["output"],
+                )
+                output = optional_has_element_reference_implementation(optional)
+                test_name = (
+                    "test_optional_has_element_"
+                    + test_name_surfix
+                    + (
+                        "_optional_input"
+                        if input_type_proto == optional_type_proto
+                        else "_tensor_input"
+                    )
+                )
+                expect(
+                    node,
+                    inputs=[optional] if input_name else [],
+                    outputs=[output],
+                    input_type_protos=[input_type_proto] if input_name else [],
+                    name=test_name,
+                )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/or.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/or.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0a86088d0f8be43bf09581ead4e25b76cc454ce
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/or.py
@@ -0,0 +1,76 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Or(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Or",
+            inputs=["x", "y"],
+            outputs=["or"],
+        )
+
+        # 2d
+        x = (np.random.randn(3, 4) > 0).astype(bool)
+        y = (np.random.randn(3, 4) > 0).astype(bool)
+        z = np.logical_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_or2d")
+
+        # 3d
+        x = (np.random.randn(3, 4, 5) > 0).astype(bool)
+        y = (np.random.randn(3, 4, 5) > 0).astype(bool)
+        z = np.logical_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_or3d")
+
+        # 4d
+        x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
+        y = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
+        z = np.logical_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_or4d")
+
+    @staticmethod
+    def export_or_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "Or",
+            inputs=["x", "y"],
+            outputs=["or"],
+        )
+
+        # 3d vs 1d
+        x = (np.random.randn(3, 4, 5) > 0).astype(bool)
+        y = (np.random.randn(5) > 0).astype(bool)
+        z = np.logical_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_or_bcast3v1d")
+
+        # 3d vs 2d
+        x = (np.random.randn(3, 4, 5) > 0).astype(bool)
+        y = (np.random.randn(4, 5) > 0).astype(bool)
+        z = np.logical_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_or_bcast3v2d")
+
+        # 4d vs 2d
+        x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
+        y = (np.random.randn(5, 6) > 0).astype(bool)
+        z = np.logical_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_or_bcast4v2d")
+
+        # 4d vs 3d
+        x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
+        y = (np.random.randn(4, 5, 6) > 0).astype(bool)
+        z = np.logical_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_or_bcast4v3d")
+
+        # 4d vs 4d
+        x = (np.random.randn(1, 4, 1, 6) > 0).astype(bool)
+        y = (np.random.randn(3, 1, 5, 6) > 0).astype(bool)
+        z = np.logical_or(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_or_bcast4v4d")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/pad.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/pad.py
new file mode 100644
index 0000000000000000000000000000000000000000..23e996f73c48942d3e3ba7c07df7458c91758599
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/pad.py
@@ -0,0 +1,132 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def pad_impl(data, raw_pads, mode, constant_values=0.0, axes=None):
+    input_rank = data.ndim
+    if axes is None:
+        axes = list(range(input_rank))
+    else:
+        axes = [axis if axis >= 0 else axis + input_rank for axis in axes]
+    num_axes = len(axes)
+
+    if num_axes * 2 != raw_pads.size:
+        raise ValueError("The number of elements in raw_pads should be 2 * num_axes")
+
+    pad_width = []
+    for _ in range(input_rank):
+        pad_width += [[0, 0]]  # init to zero
+
+    # re-order to np.pad accepted order ((x1_begin, x1_end), (x2_begin, x2_end), ...)
+    for i in range(num_axes):
+        axis = axes[i]
+        if axis < 0:
+            axis = input_rank + axis
+        pad_width[axis] = [raw_pads[i], raw_pads[i + num_axes]]
+
+    if mode == "constant":
+        y = np.pad(
+            data,
+            pad_width=pad_width,
+            mode=mode,
+            constant_values=constant_values,
+        )
+        return y
+
+    y = np.pad(
+        data,
+        pad_width=pad_width,
+        mode=mode,
+    )
+
+    return y
+
+
+class Pad(Base):
+    @staticmethod
+    def export_constant_pad() -> None:
+        node = onnx.helper.make_node(
+            "Pad", inputs=["x", "pads", "value"], outputs=["y"], mode="constant"
+        )
+        x = np.random.randn(1, 3, 4, 5).astype(np.float32)
+        pads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(
+            np.int64
+        )  # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]
+        value = np.float32(1.2)
+        y = pad_impl(x, pads, "constant", 1.2)
+
+        expect(node, inputs=[x, pads, value], outputs=[y], name="test_constant_pad")
+
+    @staticmethod
+    def export_reflection_edge_and_wrap_pad() -> None:
+        for mode in ("edge", "reflect", "wrap"):
+            node = onnx.helper.make_node(
+                "Pad", inputs=["x", "pads"], outputs=["y"], mode=mode
+            )
+            x = np.random.randn(1, 3, 4, 5).astype(np.int32)
+            pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(
+                np.int64
+            )  # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]
+            y = pad_impl(x, pads, mode)
+
+            expect(node, inputs=[x, pads], outputs=[y], name=f"test_{mode}_pad")
+
+    @staticmethod
+    def export_constant_pad_axes() -> None:
+        node = onnx.helper.make_node(
+            "Pad", inputs=["x", "pads", "value", "axes"], outputs=["y"], mode="constant"
+        )
+        x = np.random.randn(1, 3, 4, 5).astype(np.float32)
+        pads = np.array([0, 3, 0, 4]).astype(
+            np.int64
+        )  # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]
+        value = np.float32(1.2)
+        axes = np.array([1, 3], dtype=np.int64)
+        y = pad_impl(
+            x,
+            pads,
+            "constant",
+            1.2,
+            [1, 3],
+        )
+
+        expect(
+            node,
+            inputs=[x, pads, value, axes],
+            outputs=[y],
+            name="test_constant_pad_axes",
+        )
+
+    @staticmethod
+    def export_constant_pad_negative_axes() -> None:
+        node = onnx.helper.make_node(
+            "Pad", inputs=["x", "pads", "value", "axes"], outputs=["y"], mode="constant"
+        )
+        x = np.random.randn(1, 3, 4, 5).astype(np.float32)
+        pads = np.array([0, 3, 0, 4]).astype(
+            np.int64
+        )  # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]
+        value = np.float32(1.2)
+        axes = np.array([-3, -1], dtype=np.int64)
+        y = pad_impl(
+            x,
+            pads,
+            "constant",
+            1.2,
+            [-3, -1],
+        )
+
+        expect(
+            node,
+            inputs=[x, pads, value, axes],
+            outputs=[y],
+            name="test_constant_pad_negative_axes",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/pow.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/pow.py
new file mode 100644
index 0000000000000000000000000000000000000000..29219c86a967a5a9c00d9638622990458640b1b7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/pow.py
@@ -0,0 +1,107 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def pow(x, y):  # type: ignore  # noqa: A001
+    z = np.power(x, y).astype(x.dtype)
+    return z
+
+
+class Pow(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Pow",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([1, 2, 3]).astype(np.float32)
+        y = np.array([4, 5, 6]).astype(np.float32)
+        z = pow(x, y)  # expected output [1., 32., 729.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_example")
+
+        x = np.arange(60).reshape(3, 4, 5).astype(np.float32)
+        y = np.random.randn(3, 4, 5).astype(np.float32)
+        z = pow(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow")
+
+    @staticmethod
+    def export_pow_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "Pow",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([1, 2, 3]).astype(np.float32)
+        y = np.array(2).astype(np.float32)
+        z = pow(x, y)  # expected output [1., 4., 9.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_bcast_scalar")
+
+        node = onnx.helper.make_node(
+            "Pow",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+        x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
+        y = np.array([1, 2, 3]).astype(np.float32)
+        # expected output [[1, 4, 27], [4, 25, 216]]
+        z = pow(x, y)
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_bcast_array")
+
+    @staticmethod
+    def export_types() -> None:
+        node = onnx.helper.make_node(
+            "Pow",
+            inputs=["x", "y"],
+            outputs=["z"],
+        )
+
+        x = np.array([1, 2, 3]).astype(np.float32)
+        y = np.array([4, 5, 6]).astype(np.int64)
+        z = pow(x, y)  # expected output [1., 32., 729.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_types_float32_int64")
+
+        x = np.array([1, 2, 3]).astype(np.int64)
+        y = np.array([4, 5, 6]).astype(np.float32)
+        z = pow(x, y)  # expected output [1, 32, 729]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_types_int64_float32")
+
+        x = np.array([1, 2, 3]).astype(np.float32)
+        y = np.array([4, 5, 6]).astype(np.int32)
+        z = pow(x, y)  # expected output [1., 32., 729.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_types_float32_int32")
+
+        x = np.array([1, 2, 3]).astype(np.int32)
+        y = np.array([4, 5, 6]).astype(np.float32)
+        z = pow(x, y)  # expected output [1, 32, 729]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_types_int32_float32")
+
+        x = np.array([1, 2, 3]).astype(np.float32)
+        y = np.array([4, 5, 6]).astype(np.uint64)
+        z = pow(x, y)  # expected output [1., 32., 729.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_types_float32_uint64")
+
+        x = np.array([1, 2, 3]).astype(np.float32)
+        y = np.array([4, 5, 6]).astype(np.uint32)
+        z = pow(x, y)  # expected output [1., 32., 729.]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_types_float32_uint32")
+
+        x = np.array([1, 2, 3]).astype(np.int64)
+        y = np.array([4, 5, 6]).astype(np.int64)
+        z = pow(x, y)  # expected output [1, 32, 729]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_types_int64_int64")
+
+        x = np.array([1, 2, 3]).astype(np.int32)
+        y = np.array([4, 5, 6]).astype(np.int32)
+        z = pow(x, y)  # expected output [1, 32, 729]
+        expect(node, inputs=[x, y], outputs=[z], name="test_pow_types_int32_int32")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/prelu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/prelu.py
new file mode 100644
index 0000000000000000000000000000000000000000..069d49550754736e62032fb60647a9221b621588
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/prelu.py
@@ -0,0 +1,40 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class PRelu(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "PRelu",
+            inputs=["x", "slope"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        slope = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope
+
+        expect(node, inputs=[x, slope], outputs=[y], name="test_prelu_example")
+
+    @staticmethod
+    def export_prelu_broadcast() -> None:
+        node = onnx.helper.make_node(
+            "PRelu",
+            inputs=["x", "slope"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        slope = np.random.randn(5).astype(np.float32)
+        y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope
+
+        expect(node, inputs=[x, slope], outputs=[y], name="test_prelu_broadcast")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/qlinearconv.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/qlinearconv.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff547803d1beee43113e09911181a7af195e9515
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/qlinearconv.py
@@ -0,0 +1,82 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class QLinearConv(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "QLinearConv",
+            inputs=[
+                "x",
+                "x_scale",
+                "x_zero_point",
+                "w",
+                "w_scale",
+                "w_zero_point",
+                "y_scale",
+                "y_zero_point",
+            ],
+            outputs=["y"],
+        )
+
+        x = np.array(
+            [
+                [255, 174, 162, 25, 203, 168, 58],
+                [15, 59, 237, 95, 129, 0, 64],
+                [56, 242, 153, 221, 168, 12, 166],
+                [232, 178, 186, 195, 237, 162, 237],
+                [188, 39, 124, 77, 80, 102, 43],
+                [127, 230, 21, 83, 41, 40, 134],
+                [255, 154, 92, 141, 42, 148, 247],
+            ],
+            dtype=np.uint8,
+        ).reshape((1, 1, 7, 7))
+
+        x_scale = np.float32(0.00369204697)
+        x_zero_point = np.uint8(132)
+
+        w = np.array([0], dtype=np.uint8).reshape((1, 1, 1, 1))
+
+        w_scale = np.array([0.00172794575], dtype=np.float32)
+        w_zero_point = np.array([255], dtype=np.uint8)
+
+        y_scale = np.float32(0.00162681262)
+        y_zero_point = np.uint8(123)
+
+        output = np.array(
+            [
+                [0, 81, 93, 230, 52, 87, 197],
+                [240, 196, 18, 160, 126, 255, 191],
+                [199, 13, 102, 34, 87, 243, 89],
+                [23, 77, 69, 60, 18, 93, 18],
+                [67, 216, 131, 178, 175, 153, 212],
+                [128, 25, 234, 172, 214, 215, 121],
+                [0, 101, 163, 114, 213, 107, 8],
+            ],
+            dtype=np.uint8,
+        ).reshape((1, 1, 7, 7))
+
+        expect(
+            node,
+            inputs=[
+                x,
+                x_scale,
+                x_zero_point,
+                w,
+                w_scale,
+                w_zero_point,
+                y_scale,
+                y_zero_point,
+            ],
+            outputs=[output],
+            name="test_qlinearconv",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/qlinearmatmul.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/qlinearmatmul.py
new file mode 100644
index 0000000000000000000000000000000000000000..efd5fd68ed348248d357bdeef538a6fee5c59dba
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/qlinearmatmul.py
@@ -0,0 +1,157 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class QLinearMatMul(Base):
+    @staticmethod
+    def export_int() -> None:
+        for quant_type_name in ["uint8", "int8"]:
+            quant_type = getattr(np, quant_type_name)
+            for dtype_name in ["float32", "float16"]:
+                dtype = getattr(np, dtype_name)
+                node = onnx.helper.make_node(
+                    "QLinearMatMul",
+                    inputs=[
+                        "a",
+                        "a_scale",
+                        "a_zero_point",
+                        "b",
+                        "b_scale",
+                        "b_zero_point",
+                        "y_scale",
+                        "y_zero_point",
+                    ],
+                    outputs=["y"],
+                )
+
+                # 2D
+                a = np.array([[208, 236, 0, 238], [3, 214, 255, 29]])
+                if quant_type == np.int8:
+                    a -= 127
+                a = a.astype(quant_type)
+
+                a_scale = np.array([0.0066], dtype=dtype)
+                a_zero_point = np.array(
+                    [113 - 127] if quant_type == np.int8 else [113], dtype=quant_type
+                )
+
+                b = np.array(
+                    [[152, 51, 244], [60, 26, 255], [0, 127, 246], [127, 254, 247]]
+                )
+                if quant_type == np.int8:
+                    b -= 127
+                b = b.astype(quant_type)
+
+                b_scale = np.array([0.00705], dtype=dtype)
+                b_zero_point = np.array(
+                    [114 - 127] if quant_type == np.int8 else [114], dtype=quant_type
+                )
+
+                y_scale = np.array([0.0107], dtype=dtype)
+                y_zero_point = np.array(
+                    [118 - 127] if quant_type == np.int8 else [118], dtype=quant_type
+                )
+
+                if quant_type == np.int8:
+                    output = np.array([[41, -12, -9], [1, -75, 20]])
+                else:
+                    output = np.array([[168, 115, 255], [1, 66, 151]])
+                output = output.astype(quant_type)
+
+                expect(
+                    node,
+                    inputs=[
+                        a,
+                        a_scale,
+                        a_zero_point,
+                        b,
+                        b_scale,
+                        b_zero_point,
+                        y_scale,
+                        y_zero_point,
+                    ],
+                    outputs=[output],
+                    name=f"test_qlinearmatmul_2D_{quant_type_name}_{dtype_name}",
+                )
+
+                # 3D
+                a = np.array(
+                    [
+                        [[208, 236, 0, 238], [3, 214, 255, 29]],
+                        [[208, 236, 0, 238], [3, 214, 255, 29]],
+                    ],
+                )
+                if quant_type == np.int8:
+                    a -= 127
+                a = a.astype(quant_type)
+
+                a_scale = np.array([0.0066], dtype=dtype)
+                a_zero_point = np.array(
+                    [113 - 127] if quant_type == np.int8 else [113], dtype=quant_type
+                )
+
+                b = np.array(
+                    [
+                        [[152, 51, 244], [60, 26, 255], [0, 127, 246], [127, 254, 247]],
+                        [[152, 51, 244], [60, 26, 255], [0, 127, 246], [127, 254, 247]],
+                    ],
+                )
+                if quant_type == np.int8:
+                    b -= 127
+                b = b.astype(quant_type)
+
+                b_scale = np.array([0.00705], dtype=dtype)
+                b_zero_point = np.array([114], dtype=quant_type)
+
+                y_scale = np.array([0.0107], dtype=dtype)
+                y_zero_point = np.array(
+                    [118 - 127] if quant_type == np.int8 else [118], dtype=quant_type
+                )
+
+                if quant_type == np.int8:
+                    if dtype == np.float32:
+                        output = np.array(
+                            [
+                                [[-86, 117, 120], [115, 39, -121]],
+                                [[-86, 117, 120], [115, 39, -121]],
+                            ]
+                        )
+                    else:
+                        output = np.array(
+                            [
+                                [[-86, 116, 119], [115, 39, -121]],
+                                [[-86, 116, 119], [115, 39, -121]],
+                            ]
+                        )
+                else:
+                    output = np.array(
+                        [
+                            [[168, 115, 255], [1, 66, 151]],
+                            [[168, 115, 255], [1, 66, 151]],
+                        ]
+                    )
+                output = output.astype(quant_type)
+
+                expect(
+                    node,
+                    inputs=[
+                        a,
+                        a_scale,
+                        a_zero_point,
+                        b,
+                        b_scale,
+                        b_zero_point,
+                        y_scale,
+                        y_zero_point,
+                    ],
+                    outputs=[output],
+                    name=f"test_qlinearmatmul_3D_{quant_type_name}_{dtype_name}",
+                )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/quantizelinear.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/quantizelinear.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb9f286c51651b21760dc4203c4ba10320bb5467
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/quantizelinear.py
@@ -0,0 +1,438 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx import TensorProto
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.helper import make_tensor
+
+
+class QuantizeLinear(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+        )
+
+        x = np.array([0, 2, 3, 1000, -254, -1000]).astype(np.float32)
+        y_scale = np.float32(2)
+        y_zero_point = np.uint8(128)
+        y = np.array([128, 129, 130, 255, 1, 0]).astype(np.uint8)
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear",
+        )
+
+    @staticmethod
+    def export_axis() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+        )
+
+        x = np.array(
+            [
+                [
+                    [[-162, 10], [-100, 232], [-20, -50]],
+                    [[-76, 0], [0, 252], [32, -44]],
+                    [[245, -485], [-960, -270], [-375, -470]],
+                ],
+            ],
+            dtype=np.float32,
+        )
+        y_scale = np.array([2, 4, 5], dtype=np.float32)
+        y_zero_point = np.array([84, 24, 196], dtype=np.uint8)
+        y = (x / y_scale.reshape(1, 3, 1, 1) + y_zero_point.reshape(1, 3, 1, 1)).astype(
+            np.uint8
+        )
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear_axis",
+        )
+
+    @staticmethod
+    def export_e4m3fn() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+        )
+
+        x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)
+        y_scale = np.float32(2)
+        y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E4M3FN, [1], [0])
+        y = make_tensor("y", TensorProto.FLOAT8E4M3FN, [5], [0, 0.5, 1, 448, 96])
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear_e4m3fn",
+        )
+
+    @staticmethod
+    def export_e5m2() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+        )
+
+        x = np.array([0.0, 1.0, 2.0, 100000.0, 200.0]).astype(np.float32)
+        y_scale = np.float32(2)
+        y_zero_point = make_tensor("y_zero_point", TensorProto.FLOAT8E5M2, [1], [0.0])
+        y = make_tensor("y", TensorProto.FLOAT8E5M2, [5], [0, 0.5, 1, 49152, 96])
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear_e5m2",
+        )
+
+    @staticmethod
+    def export_uint16() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+        )
+
+        x = np.array(
+            [
+                0.0,
+                -128.0,
+                3.0,
+                -3.0,
+                2.9,
+                -2.9,
+                3.1,
+                -3.1,
+                65536.0,
+                -65534.0,
+                70000.0,
+                -70000.0,
+            ]
+        ).astype(np.float32)
+        y_scale = np.float32(2.0)
+        y_zero_point = np.uint16(32767)
+        y = np.array(
+            [
+                32767,
+                32703,
+                32769,
+                32765,
+                32768,
+                32766,
+                32769,
+                32765,
+                65535,
+                0,
+                65535,
+                0,
+            ]
+        ).astype(np.uint16)
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear_uint16",
+        )
+
+    @staticmethod
+    def export_int16() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+        )
+
+        x = np.array(
+            [
+                0.0,
+                -514.0,
+                3.0,
+                -3.0,
+                2.9,
+                -2.9,
+                3.1,
+                -3.1,
+                65022.0,
+                -66046.0,
+                65023.0,
+                -66047.0,
+                65024.0,
+                -66048.0,
+                70000.0,
+                -70000.0,
+            ]
+        ).astype(np.float32)
+        y_scale = np.float32(2.0)
+        y_zero_point = np.int16(256)
+        y = np.array(
+            [
+                256,
+                -1,
+                258,
+                254,
+                257,
+                255,
+                258,
+                254,
+                32767,
+                -32767,
+                32767,
+                -32768,
+                32767,
+                -32768,
+                32767,
+                -32768,
+            ]
+        ).astype(np.int16)
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear_int16",
+        )
+
+    @staticmethod
+    def export_uint4() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+            axis=0,
+        )
+
+        x = np.array(
+            [
+                [0.0, 2.5, 4.8, 8.6],
+                [-30, -20, 6, 9],
+                [12, 15, 16, 40],
+            ]
+        ).astype(np.float32)
+
+        y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)
+        y_zero_point = make_tensor(
+            "y_zero_point", TensorProto.UINT4, y_scale.shape, np.ones_like(y_scale)
+        )
+        y = make_tensor(
+            "y", TensorProto.UINT4, x.shape, [1, 2, 3, 5, 0, 0, 3, 4, 4, 5, 5, 11]
+        )
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear_uint4",
+        )
+
+    @staticmethod
+    def export_int4() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+            axis=0,
+        )
+
+        x = np.array(
+            [
+                [0.0, 2.5, 4.8, 8.6],
+                [-30, -20, 6, 9],
+                [12, 15, 16, 40],
+            ]
+        ).astype(np.float32)
+
+        y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)
+        y_zero_point = make_tensor(
+            "y_zero_point", TensorProto.INT4, y_scale.shape, np.ones_like(y_scale)
+        )
+        y = make_tensor(
+            "y", TensorProto.INT4, x.shape, [1, 2, 3, 5, -8, -6, 3, 4, 4, 5, 5, 7]
+        )
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear_int4",
+        )
+
+    @staticmethod
+    def export_float4e2m1() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+            axis=0,
+        )
+
+        x = np.array(
+            [
+                [0.0, 2.5, 4.8, 8.6],
+                [-30, -20, 6, 9],
+                [-0.0, -2.5, -4.8, -8.6],
+            ]
+        ).astype(np.float32)
+
+        y_scale = np.asarray([2.0, 3.0, 4.0], dtype=np.float32)
+        y_zero_point = make_tensor(
+            "y_zero_point",
+            TensorProto.FLOAT4E2M1,
+            y_scale.shape,
+            np.zeros_like(y_scale),
+        )
+        y = make_tensor(
+            "y",
+            TensorProto.FLOAT4E2M1,
+            x.shape,
+            [0, 1, 2, 4, -6, -6, 2, 3, 0, -0.5, -1, -2],
+        )
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear_float4e2m1",
+        )
+
+    @staticmethod
+    def export_blocked_asymmetric() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale", "y_zero_point"],
+            outputs=["y"],
+            axis=1,
+            block_size=2,
+        )
+
+        x = np.array(
+            [
+                [6.0, 12.0, 50.0, 5.0],
+                [1.0, 8.0, 4.0, 5.0],
+                [0.0, 20.0, 10.0, 4.0],
+            ],
+            dtype=np.float32,
+        )
+        y_scale = np.array(
+            [
+                [1.5, 2.5],
+                [3.0, 4.9],
+                [5.1, 6.9],
+            ],
+            dtype=np.float32,
+        )
+        y_zero_point = np.array(
+            [
+                [0, 1],
+                [1, 0],
+                [2, 3],
+            ],
+            dtype=np.uint8,
+        )
+        # x.shape = (3, 4)
+        # y_scale.shape = (3, 2)
+        assert y_scale.shape == y_zero_point.shape
+        block_axis = 1
+        # The block shape is [x.shape[i] // y_scale.shape[i] for i in range(len(x.shape))] = (1, 2)
+        assert all(
+            x.shape[i] == y_scale.shape[i]
+            for i in range(len(x.shape))
+            if i != block_axis
+        )
+        assert x.shape[block_axis] % y_scale.shape[block_axis] == 0
+        repeats = x.shape[block_axis] // y_scale.shape[block_axis]
+
+        # Create element-wise scale and zero point
+        y_scale_elementwise = np.repeat(y_scale, repeats=repeats, axis=block_axis)
+        y_zero_point_elementwise = np.repeat(
+            y_zero_point, repeats=repeats, axis=block_axis
+        )
+
+        y = np.rint(x / y_scale_elementwise + y_zero_point_elementwise).astype(np.uint8)
+
+        expect(
+            node,
+            inputs=[x, y_scale, y_zero_point],
+            outputs=[y],
+            name="test_quantizelinear_blocked_asymmetric",
+        )
+
+    @staticmethod
+    def export_blocked_symmetric() -> None:
+        node = onnx.helper.make_node(
+            "QuantizeLinear",
+            inputs=["x", "y_scale"],
+            outputs=["y"],
+            axis=1,
+            block_size=2,
+            output_dtype=TensorProto.INT16,
+        )
+
+        x = np.array(
+            [
+                [6.0, -8, -10, 5.0],
+                [1.0, 8.0, 4.0, 5.0],
+                [0.0, 20.0, 10.0, 4.0],
+            ],
+            dtype=np.float32,
+        )
+
+        y_scale = np.array(
+            [
+                [1.5, 2.5],
+                [3.0, 4.9],
+                [5.1, 6.9],
+            ],
+            dtype=np.float32,
+        )
+
+        # x.shape = (3, 4)
+        # y_scale.shape = (3, 2)
+
+        block_axis = 1
+        # The block shape is [x.shape[i] // y_scale.shape[i] for i in range(len(x.shape))] = (1, 2)
+        assert all(
+            x.shape[i] == y_scale.shape[i]
+            for i in range(len(x.shape))
+            if i != block_axis
+        )
+        assert x.shape[block_axis] % y_scale.shape[block_axis] == 0
+        repeats = x.shape[block_axis] // y_scale.shape[block_axis]
+
+        # Create element-wise scale and zero point
+        y_scale_elementwise = np.repeat(y_scale, repeats=repeats, axis=block_axis)
+
+        y_val = np.clip(
+            np.rint(x / y_scale_elementwise), a_min=-32768, a_max=32767
+        ).astype(np.int16)
+        y = make_tensor(
+            "y",
+            TensorProto.INT16,
+            x.shape,
+            y_val,
+        )
+        expect(
+            node,
+            inputs=[x, y_scale],
+            outputs=[y],
+            name="test_quantizelinear_blocked_symmetric",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rangeop.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rangeop.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2c0b8fc29b91235d67f44708617927db26e1189
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rangeop.py
@@ -0,0 +1,56 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Range(Base):
+    @staticmethod
+    def export_range_float_type_positive_delta() -> None:
+        node = onnx.helper.make_node(
+            "Range",
+            inputs=["start", "limit", "delta"],
+            outputs=["output"],
+        )
+
+        start = np.float32(1)
+        limit = np.float32(5)
+        delta = np.float32(2)
+
+        output = np.arange(
+            start, limit, delta, dtype=np.float32
+        )  # expected output [1.0, 3.0]
+        expect(
+            node,
+            inputs=[start, limit, delta],
+            outputs=[output],
+            name="test_range_float_type_positive_delta",
+        )
+
+    @staticmethod
+    def export_range_int32_type_negative_delta() -> None:
+        node = onnx.helper.make_node(
+            "Range",
+            inputs=["start", "limit", "delta"],
+            outputs=["output"],
+        )
+
+        start = np.int32(10)
+        limit = np.int32(6)
+        delta = np.int32(-3)
+
+        output = np.arange(
+            start, limit, delta, dtype=np.int32
+        )  # expected output [10, 7]
+        expect(
+            node,
+            inputs=[start, limit, delta],
+            outputs=[output],
+            name="test_range_int32_type_negative_delta",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reciprocal.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reciprocal.py
new file mode 100644
index 0000000000000000000000000000000000000000..3322e070fe528e26a02c0ddf0c64131fadbbdb3a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reciprocal.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Reciprocal(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Reciprocal",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-4, 2]).astype(np.float32)
+        y = np.reciprocal(x)  # expected output [-0.25, 0.5],
+        expect(node, inputs=[x], outputs=[y], name="test_reciprocal_example")
+
+        x = np.random.rand(3, 4, 5).astype(np.float32) + 0.5
+        y = np.reciprocal(x)
+        expect(node, inputs=[x], outputs=[y], name="test_reciprocal")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reduce_log_sum.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reduce_log_sum.py
new file mode 100644
index 0000000000000000000000000000000000000000..aee869f213c6ebe8ff4dfe0188f999cec954f681
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reduce_log_sum.py
@@ -0,0 +1,104 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class ReduceLogSum(Base):
+    @staticmethod
+    def export_nokeepdims() -> None:
+        shape = [3, 4, 5]
+        axes = np.array([2, 1], dtype=np.int64)
+
+        node = onnx.helper.make_node(
+            "ReduceLogSum",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=0,
+        )
+        data = np.random.ranf(shape).astype(np.float32)
+        reduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_desc_axes",
+        )
+
+        axes = np.array([0, 1], dtype=np.int64)
+        node = onnx.helper.make_node(
+            "ReduceLogSum",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=0,
+        )
+        data = np.random.ranf(shape).astype(np.float32)
+        reduced = np.log(np.sum(data, axis=tuple(axes), keepdims=False))
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_asc_axes",
+        )
+
+    @staticmethod
+    def export_keepdims() -> None:
+        node = onnx.helper.make_node(
+            "ReduceLogSum", inputs=["data", "axes"], outputs=["reduced"]
+        )
+        data = np.random.ranf([3, 4, 5]).astype(np.float32)
+        reduced = np.log(np.sum(data, keepdims=True))
+        axes = np.array([], dtype=np.int64)
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_default",
+        )
+
+    @staticmethod
+    def export_negative_axes_keepdims() -> None:
+        axes = np.array([-2], dtype=np.int64)
+        node = onnx.helper.make_node(
+            "ReduceLogSum", inputs=["data", "axes"], outputs=["reduced"]
+        )
+        data = np.random.ranf([3, 4, 5]).astype(np.float32)
+        reduced = np.log(np.sum(data, axis=tuple(axes), keepdims=True))
+        # print(reduced)
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_negative_axes",
+        )
+
+    @staticmethod
+    def export_empty_set() -> None:
+        shape = [2, 0, 4]
+        keepdims = 1
+        reduced_shape = [2, 1, 4]
+
+        node = onnx.helper.make_node(
+            "ReduceLogSum",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.array([], dtype=np.float32).reshape(shape)
+        axes = np.array([1], dtype=np.int64)
+        zero = np.array(np.zeros(reduced_shape, dtype=np.float32))
+        reduced = np.log(zero)  # -inf
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_empty_set",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reduce_log_sum_exp.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reduce_log_sum_exp.py
new file mode 100644
index 0000000000000000000000000000000000000000..50b7dbb7080723ab38bdfb0f6c24f1edec8d1591
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reduce_log_sum_exp.py
@@ -0,0 +1,193 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class ReduceLogSumExp(Base):
+    @staticmethod
+    def export_do_not_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([1], dtype=np.int64)
+        keepdims = 0
+        node = onnx.helper.make_node(
+            "ReduceLogSumExp",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.array(
+            [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double
+        )
+        reduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))
+        # print(reduced)
+        # [[20., 2.31326175]
+        # [40.00004578, 2.31326175]
+        # [60.00671387, 2.31326175]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_exp_do_not_keepdims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.double)
+        reduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_exp_do_not_keepdims_random",
+        )
+
+    @staticmethod
+    def export_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([1], dtype=np.int64)
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ReduceLogSumExp",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.array(
+            [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double
+        )
+        reduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))
+        # print(reduced)
+        # [[[20., 2.31326175]]
+        # [[40.00004578, 2.31326175]]
+        # [[60.00671387, 2.31326175]]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_exp_keepdims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.double)
+        reduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_exp_keepdims_random",
+        )
+
+    @staticmethod
+    def export_default_axes_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([], dtype=np.int64)
+        keepdims = 1
+
+        node = onnx.helper.make_node(
+            "ReduceLogSumExp",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.array(
+            [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double
+        )
+        reduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))
+        # print(reduced)
+        # [[[60.00671387]]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_exp_default_axes_keepdims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.double)
+        reduced = np.log(np.sum(np.exp(data), axis=None, keepdims=keepdims == 1))
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_exp_default_axes_keepdims_random",
+        )
+
+    @staticmethod
+    def export_negative_axes_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([-2], dtype=np.int64)
+        keepdims = 1
+        node = onnx.helper.make_node(
+            "ReduceLogSumExp",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.array(
+            [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.double
+        )
+        reduced = np.log(np.sum(np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))
+        # print(reduced)
+        # [[[20., 2.31326175]]
+        # [[40.00004578, 2.31326175]]
+        # [[60.00671387, 2.31326175]]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_exp_negative_axes_keepdims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.double)
+        reduced = np.log(
+            np.sum(np.exp(data), axis=tuple(axes.tolist()), keepdims=keepdims == 1)
+        )
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_exp_negative_axes_keepdims_random",
+        )
+
+    @staticmethod
+    def export_empty_set() -> None:
+        shape = [2, 0, 4]
+        keepdims = 1
+        reduced_shape = [2, 1, 4]
+
+        node = onnx.helper.make_node(
+            "ReduceLogSumExp",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.array([], dtype=np.float32).reshape(shape)
+        axes = np.array([1], dtype=np.int64)
+        zero = np.array(np.zeros(reduced_shape, dtype=np.float32))
+        reduced = np.log(zero)  # -inf
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_log_sum_exp_empty_set",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reducel1.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reducel1.py
new file mode 100644
index 0000000000000000000000000000000000000000..51fc52627f2f3371fb7d5857927bc72979628cf6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reducel1.py
@@ -0,0 +1,189 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class ReduceL1(Base):
+    @staticmethod
+    def export_do_not_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([2], dtype=np.int64)
+        keepdims = 0
+
+        node = onnx.helper.make_node(
+            "ReduceL1",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
+
+        reduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)
+        # print(reduced)
+        # [[3., 7.], [11., 15.], [19., 23.]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l1_do_not_keepdims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.float32)
+        reduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l1_do_not_keepdims_random",
+        )
+
+    @staticmethod
+    def export_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([2], dtype=np.int64)
+        keepdims = 1
+
+        node = onnx.helper.make_node(
+            "ReduceL1",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
+
+        reduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)
+        # print(reduced)
+        # [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l1_keep_dims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.float32)
+        reduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l1_keep_dims_random",
+        )
+
+    @staticmethod
+    def export_default_axes_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([], dtype=np.int64)
+        keepdims = 1
+
+        node = onnx.helper.make_node(
+            "ReduceL1", inputs=["data", "axes"], outputs=["reduced"], keepdims=keepdims
+        )
+
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
+
+        reduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)
+        # print(reduced)
+        # [[[78.]]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l1_default_axes_keepdims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.float32)
+        reduced = np.sum(a=np.abs(data), axis=None, keepdims=keepdims == 1)
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l1_default_axes_keepdims_random",
+        )
+
+    @staticmethod
+    def export_negative_axes_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([-1], dtype=np.int64)
+        keepdims = 1
+
+        node = onnx.helper.make_node(
+            "ReduceL1",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
+
+        reduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)
+        # print(reduced)
+        # [[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l1_negative_axes_keep_dims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.float32)
+        reduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l1_negative_axes_keep_dims_random",
+        )
+
+    @staticmethod
+    def export_empty_set() -> None:
+        shape = [2, 0, 4]
+        keepdims = 1
+        reduced_shape = [2, 1, 4]
+
+        node = onnx.helper.make_node(
+            "ReduceL1",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.array([], dtype=np.float32).reshape(shape)
+        axes = np.array([1], dtype=np.int64)
+        reduced = np.array(np.zeros(reduced_shape, dtype=np.float32))
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l1_empty_set",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reducel2.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reducel2.py
new file mode 100644
index 0000000000000000000000000000000000000000..6eaa6359c5e999d3c7184f050dc0dc54ea73f2c2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reducel2.py
@@ -0,0 +1,207 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class ReduceL2(Base):
+    @staticmethod
+    def export_do_not_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([2], dtype=np.int64)
+        keepdims = 0
+
+        node = onnx.helper.make_node(
+            "ReduceL2",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
+
+        reduced = np.sqrt(
+            np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)
+        )
+        # print(reduced)
+        # [[2.23606798, 5.],
+        # [7.81024968, 10.63014581],
+        # [13.45362405, 16.2788206]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l2_do_not_keepdims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.float32)
+        reduced = np.sqrt(
+            np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)
+        )
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l2_do_not_keepdims_random",
+        )
+
+    @staticmethod
+    def export_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([2], dtype=np.int64)
+        keepdims = 1
+
+        node = onnx.helper.make_node(
+            "ReduceL2",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
+
+        reduced = np.sqrt(
+            np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)
+        )
+        # print(reduced)
+        # [[[2.23606798], [5.]]
+        # [[7.81024968], [10.63014581]]
+        # [[13.45362405], [16.2788206 ]]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l2_keep_dims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.float32)
+        reduced = np.sqrt(
+            np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)
+        )
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l2_keep_dims_random",
+        )
+
+    @staticmethod
+    def export_default_axes_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([], dtype=np.int64)
+        keepdims = 1
+
+        node = onnx.helper.make_node(
+            "ReduceL2", inputs=["data", "axes"], outputs=["reduced"], keepdims=keepdims
+        )
+
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
+
+        reduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))
+        # print(reduced)
+        # [[[25.49509757]]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l2_default_axes_keepdims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.float32)
+        reduced = np.sqrt(np.sum(a=np.square(data), axis=None, keepdims=keepdims == 1))
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l2_default_axes_keepdims_random",
+        )
+
+    @staticmethod
+    def export_negative_axes_keepdims() -> None:
+        shape = [3, 2, 2]
+        axes = np.array([-1], dtype=np.int64)
+        keepdims = 1
+
+        node = onnx.helper.make_node(
+            "ReduceL2",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
+        # print(data)
+        # [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
+
+        reduced = np.sqrt(
+            np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)
+        )
+        # print(reduced)
+        # [[[2.23606798], [5.]]
+        # [[7.81024968], [10.63014581]]
+        # [[13.45362405], [16.2788206 ]]]
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l2_negative_axes_keep_dims_example",
+        )
+
+        np.random.seed(0)
+        data = np.random.uniform(-10, 10, shape).astype(np.float32)
+        reduced = np.sqrt(
+            np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1)
+        )
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l2_negative_axes_keep_dims_random",
+        )
+
+    @staticmethod
+    def export_empty_set() -> None:
+        shape = [2, 0, 4]
+        keepdims = 1
+        reduced_shape = [2, 1, 4]
+
+        node = onnx.helper.make_node(
+            "ReduceL2",
+            inputs=["data", "axes"],
+            outputs=["reduced"],
+            keepdims=keepdims,
+        )
+
+        data = np.array([], dtype=np.float32).reshape(shape)
+        axes = np.array([1], dtype=np.int64)
+        reduced = np.array(np.zeros(reduced_shape, dtype=np.float32))
+
+        expect(
+            node,
+            inputs=[data, axes],
+            outputs=[reduced],
+            name="test_reduce_l2_empty_set",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/regex_full_match.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/regex_full_match.py
new file mode 100644
index 0000000000000000000000000000000000000000..74619205481da741ff840b6fedfc5b063513ec11
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/regex_full_match.py
@@ -0,0 +1,68 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class RegexFullMatch(Base):
+    @staticmethod
+    def export_basic() -> None:
+        node = onnx.helper.make_node(
+            "RegexFullMatch",
+            inputs=["X"],
+            outputs=["Y"],
+            pattern=r"www\.[\w.-]+\.\bcom\b",
+        )
+
+        x = np.array(["www.google.com", "www.facebook.com", "www.bbc.co.uk"]).astype(
+            object
+        )
+        result = np.array([True, True, False])
+        expect(node, inputs=[x], outputs=[result], name="test_regex_full_match_basic")
+
+    @staticmethod
+    def export_match_email_domain() -> None:
+        node = onnx.helper.make_node(
+            "RegexFullMatch",
+            inputs=["X"],
+            outputs=["Y"],
+            pattern=r"(\W|^)[\w.\-]{0,25}@(yahoo|gmail)\.com(\W|$)",
+        )
+
+        x = np.array(
+            [
+                ["account@gmail.com", "account@hotmail.com"],
+                ["not email", "account2@yahoo.com"],
+            ]
+        ).astype(object)
+        result = np.array([[True, False], [False, True]])
+        expect(
+            node,
+            inputs=[x],
+            outputs=[result],
+            name="test_regex_full_match_email_domain",
+        )
+
+    @staticmethod
+    def export_match_empty() -> None:
+        node = onnx.helper.make_node(
+            "RegexFullMatch",
+            inputs=["X"],
+            outputs=["Y"],
+            pattern=r"(\W|^)[\w.\-]{0,25}@(yahoo|gmail)\.com(\W|$)",
+        )
+
+        x = np.array([[], []]).astype(object)
+        result = np.array([[], []]).astype(bool)
+        expect(
+            node,
+            inputs=[x],
+            outputs=[result],
+            name="test_regex_full_match_empty",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/relu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/relu.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e8b1ea7f38eddc02c03634e0b0cddf15fc81e12
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/relu.py
@@ -0,0 +1,24 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Relu(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Relu",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, 0, np.inf)
+
+        expect(node, inputs=[x], outputs=[y], name="test_relu")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reshape.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reshape.py
new file mode 100644
index 0000000000000000000000000000000000000000..3eb06d1310ec4af05a9d8faeabd9b5454429c29f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reshape.py
@@ -0,0 +1,83 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def reshape_reference_implementation(
+    data: np.ndarray, shape: np.ndarray, allowzero: int = 0
+) -> np.ndarray:
+    # replace zeros with corresponding dim size
+    # we need to do this because np.reshape doesn't support 0 by default unless 'allowzero' is set
+    new_shape = np.copy(shape)
+    if allowzero == 0:
+        zeros_index = np.where(shape == 0)
+        new_shape[zeros_index] = np.array(data.shape)[zeros_index]
+    reshaped = np.reshape(data, new_shape)
+    return reshaped
+
+
+class Reshape(Base):
+    @staticmethod
+    def export_reshape() -> None:
+        original_shape = [2, 3, 4]
+        test_cases = {
+            "reordered_all_dims": np.array([4, 2, 3], dtype=np.int64),
+            "reordered_last_dims": np.array([2, 4, 3], dtype=np.int64),
+            "reduced_dims": np.array([2, 12], dtype=np.int64),
+            "extended_dims": np.array([2, 3, 2, 2], dtype=np.int64),
+            "one_dim": np.array([24], dtype=np.int64),
+            "negative_dim": np.array([2, -1, 2], dtype=np.int64),
+            "negative_extended_dims": np.array([-1, 2, 3, 4], dtype=np.int64),
+            "zero_dim": np.array([2, 0, 4, 1], dtype=np.int64),
+            "zero_and_negative_dim": np.array([2, 0, 1, -1], dtype=np.int64),
+        }
+        data = np.random.random_sample(original_shape).astype(np.float32)
+
+        for test_name, shape in test_cases.items():
+            node = onnx.helper.make_node(
+                "Reshape",
+                inputs=["data", "shape"],
+                outputs=["reshaped"],
+            )
+
+            reshaped = reshape_reference_implementation(data, shape)
+
+            expect(
+                node,
+                inputs=[data, shape],
+                outputs=[reshaped],
+                name="test_reshape_" + test_name,
+            )
+
+    @staticmethod
+    def export_allowzero() -> None:
+        original_shape = [0, 3, 4]
+        test_cases = {
+            "allowzero_reordered": np.array([3, 4, 0], dtype=np.int64),
+        }
+        data = np.random.random_sample(original_shape).astype(np.float32)
+
+        for test_name, shape in test_cases.items():
+            node = onnx.helper.make_node(
+                "Reshape",
+                inputs=["data", "shape"],
+                outputs=["reshaped"],
+                allowzero=1,  # if allowzero=1, final shape = (3, 4, 0)
+                # if allowzero=0, final shape = (3, 4, 4)
+            )
+
+            reshaped = reshape_reference_implementation(data, shape, allowzero=1)
+
+            expect(
+                node,
+                inputs=[data, shape],
+                outputs=[reshaped],
+                name="test_reshape_" + test_name,
+            )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/resize.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/resize.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb736d71423dc01a3bf65d355e6dd0bff1ef7dcd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/resize.py
@@ -0,0 +1,1714 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.reference.ops.op_resize import _cubic_coeffs as cubic_coeffs
+from onnx.reference.ops.op_resize import (
+    _cubic_coeffs_antialias as cubic_coeffs_antialias,
+)
+from onnx.reference.ops.op_resize import _interpolate_nd as interpolate_nd
+from onnx.reference.ops.op_resize import _linear_coeffs as linear_coeffs
+from onnx.reference.ops.op_resize import (
+    _linear_coeffs_antialias as linear_coeffs_antialias,
+)
+from onnx.reference.ops.op_resize import _nearest_coeffs as nearest_coeffs
+
+
+class Resize(Base):
+    @staticmethod
+    def export_resize_upsample_scales_nearest() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="nearest",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
+
+        # [[[[1. 1. 1. 2. 2. 2.]
+        #    [1. 1. 1. 2. 2. 2.]
+        #    [3. 3. 3. 4. 4. 4.]
+        #    [3. 3. 3. 4. 4. 4.]]]]
+        output = interpolate_nd(
+            data, lambda x, _: nearest_coeffs(x), scale_factors=scales
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_nearest",
+        )
+
+    @staticmethod
+    def export_resize_downsample_scales_nearest() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="nearest",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+        # [[[[1. 3.]]]]
+        output = interpolate_nd(
+            data, lambda x, _: nearest_coeffs(x), scale_factors=scales
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_downsample_scales_nearest",
+        )
+
+    @staticmethod
+    def export_resize_upsample_sizes_nearest() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 7, 8], dtype=np.int64)
+
+        # [[[[1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]]]]
+        output = interpolate_nd(
+            data, lambda x, _: nearest_coeffs(x), output_size=sizes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_upsample_sizes_nearest",
+        )
+
+    @staticmethod
+    def export_resize_downsample_sizes_nearest() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 1, 3], dtype=np.int64)
+
+        # [[[[1. 2. 4.]]]]
+        output = interpolate_nd(
+            data, lambda x, _: nearest_coeffs(x), output_size=sizes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_downsample_sizes_nearest",
+        )
+
+    @staticmethod
+    def export_resize_upsample_scales_linear() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="linear",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+        # [[[[1.   1.25 1.75 2.  ]
+        #    [1.5  1.75 2.25 2.5 ]
+        #    [2.5  2.75 3.25 3.5 ]
+        #    [3.   3.25 3.75 4.  ]]]]
+        output = interpolate_nd(
+            data, lambda x, _: linear_coeffs(x), scale_factors=scales
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_linear",
+        )
+
+    @staticmethod
+    def export_resize_upsample_scales_linear_align_corners() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="linear",
+            coordinate_transformation_mode="align_corners",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+        # [[[[1.         1.33333333 1.66666667 2.        ]
+        #    [1.66666667 2.         2.33333333 2.66666667]
+        #    [2.33333333 2.66666667 3.         3.33333333]
+        #    [3.         3.33333333 3.66666667 4.        ]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: linear_coeffs(x),
+            scale_factors=scales,
+            coordinate_transformation_mode="align_corners",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_linear_align_corners",
+        )
+
+    @staticmethod
+    def export_resize_downsample_scales_linear() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="linear",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+        # [[[[2.6666665 4.3333331]]]]
+        output = interpolate_nd(
+            data, lambda x, _: linear_coeffs(x), scale_factors=scales
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_downsample_scales_linear",
+        )
+
+    @staticmethod
+    def export_resize_downsample_scales_linear_align_corners() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="linear",
+            coordinate_transformation_mode="align_corners",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+        # [[[[1.       3.142857]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: linear_coeffs(x),
+            scale_factors=scales,
+            coordinate_transformation_mode="align_corners",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_downsample_scales_linear_align_corners",
+        )
+
+    @staticmethod
+    def export_resize_upsample_scales_cubic() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="cubic",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+        # [[[[ 0.47265625  0.76953125  1.24609375  1.875       2.28125
+        #      2.91015625  3.38671875  3.68359375]
+        #    [ 1.66015625  1.95703125  2.43359375  3.0625      3.46875
+        #      4.09765625  4.57421875  4.87109375]
+        #    [ 3.56640625  3.86328125  4.33984375  4.96875     5.375
+        #      6.00390625  6.48046875  6.77734375]
+        #    [ 6.08203125  6.37890625  6.85546875  7.484375    7.890625
+        #      8.51953125  8.99609375  9.29296875]
+        #    [ 7.70703125  8.00390625  8.48046875  9.109375    9.515625
+        #     10.14453125 10.62109375 10.91796875]
+        #    [10.22265625 10.51953125 10.99609375 11.625      12.03125
+        #     12.66015625 13.13671875 13.43359375]
+        #    [12.12890625 12.42578125 12.90234375 13.53125    13.9375
+        #     14.56640625 15.04296875 15.33984375]
+        #    [13.31640625 13.61328125 14.08984375 14.71875    15.125
+        #     15.75390625 16.23046875 16.52734375]]]]
+        output = interpolate_nd(
+            data, lambda x, _: cubic_coeffs(x), scale_factors=scales
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_cubic",
+        )
+
+    @staticmethod
+    def export_resize_upsample_scales_cubic_align_corners() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="cubic",
+            coordinate_transformation_mode="align_corners",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+        # [[[[ 1.          1.34110787  1.80029155  2.32944606  2.67055394
+        #      3.19970845  3.65889213  4.        ]
+        #    [ 2.36443149  2.70553936  3.16472303  3.69387755  4.03498542
+        #      4.56413994  5.02332362  5.36443149]
+        #    [ 4.20116618  4.54227405  5.00145773  5.53061224  5.87172012
+        #      6.40087464  6.86005831  7.20116618]
+        #    [ 6.31778426  6.65889213  7.1180758   7.64723032  7.98833819
+        #      8.51749271  8.97667638  9.31778426]
+        #    [ 7.68221574  8.02332362  8.48250729  9.01166181  9.35276968
+        #      9.8819242  10.34110787 10.68221574]
+        #    [ 9.79883382 10.13994169 10.59912536 11.12827988 11.46938776
+        #     11.99854227 12.45772595 12.79883382]
+        #    [11.63556851 11.97667638 12.43586006 12.96501458 13.30612245
+        #     13.83527697 14.29446064 14.63556851]
+        #    [13.         13.34110787 13.80029155 14.32944606 14.67055394
+        #     15.19970845 15.65889213 16.        ]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: cubic_coeffs(x),
+            scale_factors=scales,
+            coordinate_transformation_mode="align_corners",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_cubic_align_corners",
+        )
+
+    @staticmethod
+    def export_resize_downsample_scales_cubic() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="cubic",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
+
+        # [[[[ 1.47119141  2.78125     4.08251953]
+        #    [ 6.71142578  8.02148438  9.32275391]
+        #    [11.91650391 13.2265625  14.52783203]]]]
+        output = interpolate_nd(
+            data, lambda x, _: cubic_coeffs(x), scale_factors=scales
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_downsample_scales_cubic",
+        )
+
+    @staticmethod
+    def export_resize_downsample_scales_cubic_align_corners() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="cubic",
+            coordinate_transformation_mode="align_corners",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
+
+        # [[[[ 1.          2.39519159  3.79038317]
+        #    [ 6.58076634  7.97595793  9.37114951]
+        #    [12.16153268 13.55672427 14.95191585]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: cubic_coeffs(x),
+            scale_factors=scales,
+            coordinate_transformation_mode="align_corners",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_downsample_scales_cubic_align_corners",
+        )
+
+    @staticmethod
+    def export_resize_upsample_sizes_cubic() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="cubic",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 9, 10], dtype=np.int64)
+
+        # [[[[ 0.45507922  0.64057922  0.97157922  1.42257922  1.90732922
+        #      2.22332922  2.70807922  3.15907922  3.49007922  3.67557922]
+        #    [ 1.39437963  1.57987963  1.91087963  2.36187963  2.84662963
+        #      3.16262963  3.64737963  4.09837963  4.42937963  4.61487963]
+        #    [ 2.95130693  3.13680693  3.46780693  3.91880693  4.40355693
+        #      4.71955693  5.20430693  5.65530693  5.98630693  6.17180693]
+        #    [ 5.20525069  5.39075069  5.72175069  6.17275069  6.65750069
+        #      6.97350069  7.45825069  7.90925069  8.24025069  8.42575069]
+        #    [ 6.88975     7.07525     7.40625     7.85725     8.342
+        #      8.658       9.14275     9.59375     9.92475    10.11025   ]
+        #    [ 8.57424931  8.75974931  9.09074931  9.54174931 10.02649931
+        #     10.34249931 10.82724931 11.27824931 11.60924931 11.79474931]
+        #    [10.82819307 11.01369307 11.34469307 11.79569307 12.28044307
+        #     12.59644307 13.08119307 13.53219307 13.86319307 14.04869307]
+        #    [12.38512037 12.57062037 12.90162037 13.35262037 13.83737037
+        #     14.15337037 14.63812037 15.08912037 15.42012037 15.60562037]
+        #    [13.32442078 13.50992078 13.84092078 14.29192078 14.77667078
+        #     15.09267078 15.57742078 16.02842078 16.35942078 16.54492078]]]]
+        output = interpolate_nd(
+            data, lambda x, _: cubic_coeffs(x), output_size=sizes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_upsample_sizes_cubic",
+        )
+
+    @staticmethod
+    def export_resize_downsample_sizes_cubic() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="cubic",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+        # [[[[ 1.63078704  3.00462963  4.37847222]
+        #    [ 7.12615741  8.5         9.87384259]
+        #    [12.62152778 13.99537037 15.36921296]]]]
+        output = interpolate_nd(
+            data, lambda x, _: cubic_coeffs(x), output_size=sizes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_downsample_sizes_cubic",
+        )
+
+    # TensorFlow v1 bicubic with half_pixel_centers=True
+    @staticmethod
+    def export_resize_upsample_scales_cubic_A_n0p5_exclude_outside() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="cubic",
+            cubic_coeff_a=-0.5,
+            exclude_outside=True,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+        # [[[[ 0.55882353  0.81494204  1.35698249  1.89705882  2.39705882
+        #      2.93713516  3.47917561  3.73529412]
+        #    [ 1.58329755  1.83941606  2.38145651  2.92153285  3.42153285
+        #      3.96160918  4.50364964  4.75976814]
+        #    [ 3.75145936  4.00757787  4.54961832  5.08969466  5.58969466
+        #      6.12977099  6.67181144  6.92792995]
+        #    [ 5.91176471  6.16788321  6.70992366  7.25        7.75
+        #      8.29007634  8.83211679  9.08823529]
+        #    [ 7.91176471  8.16788321  8.70992366  9.25        9.75
+        #     10.29007634 10.83211679 11.08823529]
+        #    [10.07207005 10.32818856 10.87022901 11.41030534 11.91030534
+        #     12.45038168 12.99242213 13.24854064]
+        #    [12.24023186 12.49635036 13.03839082 13.57846715 14.07846715
+        #     14.61854349 15.16058394 15.41670245]
+        #    [13.26470588 13.52082439 14.06286484 14.60294118 15.10294118
+        #     15.64301751 16.18505796 16.44117647]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: cubic_coeffs(x, A=-0.5),
+            scale_factors=scales,
+            exclude_outside=True,
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_cubic_A_n0p5_exclude_outside",
+        )
+
+    @staticmethod
+    def export_resize_downsample_scales_cubic_A_n0p5_exclude_outside() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="cubic",
+            cubic_coeff_a=-0.5,
+            exclude_outside=True,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
+
+        # [[[[ 1.36812675  2.6695014   4.0133367 ]
+        #    [ 6.57362535  7.875       9.2188353 ]
+        #    [11.94896657 13.25034122 14.59417652]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: cubic_coeffs(x, A=-0.5),
+            scale_factors=scales,
+            exclude_outside=True,
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_downsample_scales_cubic_A_n0p5_exclude_outside",
+        )
+
+    # TensorFlow v1 bicubic with half_pixel_centers=False
+    @staticmethod
+    def export_resize_upsample_scales_cubic_asymmetric() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="cubic",
+            coordinate_transformation_mode="asymmetric",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+        # [[[[ 1.       1.40625  2.       2.5      3.       3.59375  4.
+        #      4.09375]
+        #    [ 2.625    3.03125  3.625    4.125    4.625    5.21875  5.625
+        #      5.71875]
+        #    [ 5.       5.40625  6.       6.5      7.       7.59375  8.
+        #      8.09375]
+        #    [ 7.       7.40625  8.       8.5      9.       9.59375 10.
+        #     10.09375]
+        #    [ 9.       9.40625 10.      10.5     11.      11.59375 12.
+        #     12.09375]
+        #    [11.375   11.78125 12.375   12.875   13.375   13.96875 14.375
+        #     14.46875]
+        #    [13.      13.40625 14.      14.5     15.      15.59375 16.
+        #     16.09375]
+        #    [13.375   13.78125 14.375   14.875   15.375   15.96875 16.375
+        #     16.46875]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: cubic_coeffs(x, A=-0.75),
+            scale_factors=scales,
+            coordinate_transformation_mode="asymmetric",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_cubic_asymmetric",
+        )
+
+    @staticmethod
+    def export_resize_tf_crop_and_resize() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "roi", "", "sizes"],
+            outputs=["Y"],
+            mode="linear",
+            coordinate_transformation_mode="tf_crop_and_resize",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        # Note: for some rois, the result may be different with that of TF for inaccurate floating point
+        roi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)
+        sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+        # [[[[ 7.6000004  7.9        8.2      ]
+        #    [ 8.8        9.1        9.400001 ]
+        #    [10.        10.3       10.6      ]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: linear_coeffs(x),
+            output_size=sizes,
+            roi=roi,
+            coordinate_transformation_mode="tf_crop_and_resize",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, roi, sizes],
+            outputs=[output],
+            name="test_resize_tf_crop_and_resize",
+        )
+
+    @staticmethod
+    def export_resize_tf_crop_and_resize_extrapolation_value() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "roi", "", "sizes"],
+            outputs=["Y"],
+            mode="linear",
+            coordinate_transformation_mode="tf_crop_and_resize",
+            extrapolation_value=10.0,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        # Note: for some rois, the result may be different with that of TF for inaccurate floating point
+        roi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)
+        sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+        # [[[[ 7.6000004 10.        10.       ]
+        #    [12.400001  10.        10.       ]
+        #    [10.        10.        10.       ]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: linear_coeffs(x),
+            output_size=sizes,
+            roi=roi,
+            coordinate_transformation_mode="tf_crop_and_resize",
+            extrapolation_value=10.0,
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, roi, sizes],
+            outputs=[output],
+            name="test_resize_tf_crop_and_resize_extrapolation_value",
+        )
+
+    @staticmethod
+    def export_resize_downsample_sizes_linear_pytorch_half_pixel() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="linear",
+            coordinate_transformation_mode="pytorch_half_pixel",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 3, 1], dtype=np.int64)
+
+        # [[[[ 1.6666666]
+        #    [ 7.       ]
+        #    [12.333333 ]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: linear_coeffs(x),
+            output_size=sizes,
+            coordinate_transformation_mode="pytorch_half_pixel",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_downsample_sizes_linear_pytorch_half_pixel",
+        )
+
+    @staticmethod
+    def export_resize_upsample_sizes_nearest_floor_align_corners() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+            coordinate_transformation_mode="align_corners",
+            nearest_mode="floor",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 8, 8], dtype=np.int64)
+
+        # [[[[ 1.  1.  1.  2.  2.  3.  3.  4.]
+        #    [ 1.  1.  1.  2.  2.  3.  3.  4.]
+        #    [ 1.  1.  1.  2.  2.  3.  3.  4.]
+        #    [ 5.  5.  5.  6.  6.  7.  7.  8.]
+        #    [ 5.  5.  5.  6.  6.  7.  7.  8.]
+        #    [ 9.  9.  9. 10. 10. 11. 11. 12.]
+        #    [ 9.  9.  9. 10. 10. 11. 11. 12.]
+        #    [13. 13. 13. 14. 14. 15. 15. 16.]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: nearest_coeffs(x, mode="floor"),
+            output_size=sizes,
+            coordinate_transformation_mode="align_corners",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_upsample_sizes_nearest_floor_align_corners",
+        )
+
+    @staticmethod
+    def export_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+            coordinate_transformation_mode="asymmetric",
+            nearest_mode="round_prefer_ceil",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 8, 8], dtype=np.int64)
+
+        # [[[[ 1.  2.  2.  3.  3.  4.  4.  4.]
+        #    [ 5.  6.  6.  7.  7.  8.  8.  8.]
+        #    [ 5.  6.  6.  7.  7.  8.  8.  8.]
+        #    [ 9. 10. 10. 11. 11. 12. 12. 12.]
+        #    [ 9. 10. 10. 11. 11. 12. 12. 12.]
+        #    [13. 14. 14. 15. 15. 16. 16. 16.]
+        #    [13. 14. 14. 15. 15. 16. 16. 16.]
+        #    [13. 14. 14. 15. 15. 16. 16. 16.]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: nearest_coeffs(x, mode="round_prefer_ceil"),
+            output_size=sizes,
+            coordinate_transformation_mode="asymmetric",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric",
+        )
+
+    @staticmethod
+    def export_resize_upsample_sizes_nearest_ceil_half_pixel() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+            coordinate_transformation_mode="half_pixel",
+            nearest_mode="ceil",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 8, 8], dtype=np.int64)
+
+        # [[[[ 1.  2.  2.  3.  3.  4.  4.  4.]
+        #    [ 5.  6.  6.  7.  7.  8.  8.  8.]
+        #    [ 5.  6.  6.  7.  7.  8.  8.  8.]
+        #    [ 9. 10. 10. 11. 11. 12. 12. 12.]
+        #    [ 9. 10. 10. 11. 11. 12. 12. 12.]
+        #    [13. 14. 14. 15. 15. 16. 16. 16.]
+        #    [13. 14. 14. 15. 15. 16. 16. 16.]
+        #    [13. 14. 14. 15. 15. 16. 16. 16.]]]]
+        output = interpolate_nd(
+            data, lambda x, _: nearest_coeffs(x, mode="ceil"), output_size=sizes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_upsample_sizes_nearest_ceil_half_pixel",
+        )
+
+    @staticmethod
+    def export_resize_downsample_scales_linear_antialias() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="linear",
+            antialias=1,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+        # [[[[ 2.875  4.5  ]
+        #    [ 9.375 11.   ]]]]
+        output = interpolate_nd(
+            data, linear_coeffs_antialias, scale_factors=scales
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_downsample_scales_linear_antialias",
+        )
+
+    @staticmethod
+    def export_resize_downsample_sizes_linear_antialias() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="linear",
+            antialias=1,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+        # [[[[ 2.3636363  3.590909   4.818182 ]
+        #    [ 7.2727275  8.5        9.727273 ]
+        #    [12.181818  13.409091  14.636364 ]]]]
+        output = interpolate_nd(
+            data, linear_coeffs_antialias, output_size=sizes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_downsample_sizes_linear_antialias",
+        )
+
+    @staticmethod
+    def export_resize_downsample_scales_cubic_antialias() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="cubic",
+            antialias=1,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+        # [[[[ 2.5180721  4.2858863]
+        #    [ 9.589329  11.357142 ]]]]
+        output = interpolate_nd(
+            data, cubic_coeffs_antialias, scale_factors=scales
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_downsample_scales_cubic_antialias",
+        )
+
+    @staticmethod
+    def export_resize_downsample_sizes_cubic_antialias() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="cubic",
+            antialias=1,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+        # [[[[ 1.7750092  3.1200073  4.4650054]
+        #    [ 7.1550016  8.5        9.844998 ]
+        #    [12.534994  13.8799925 15.224991 ]]]]
+        output = interpolate_nd(data, cubic_coeffs_antialias, output_size=sizes).astype(
+            np.float32
+        )
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_downsample_sizes_cubic_antialias",
+        )
+
+    @staticmethod
+    def export_resize_upsample_scales_nearest_axes_2_3() -> None:
+        axes = [2, 3]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="nearest",
+            axes=axes,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([2.0, 3.0], dtype=np.float32)
+
+        # [[[[1. 1. 1. 2. 2. 2.]
+        #    [1. 1. 1. 2. 2. 2.]
+        #    [3. 3. 3. 4. 4. 4.]
+        #    [3. 3. 3. 4. 4. 4.]]]]
+        output = interpolate_nd(
+            data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_nearest_axes_2_3",
+        )
+
+    @staticmethod
+    def export_resize_upsample_scales_nearest_axes_3_2() -> None:
+        axes = [3, 2]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="nearest",
+            axes=axes,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([3.0, 2.0], dtype=np.float32)
+
+        # [[[[1. 1. 1. 2. 2. 2.]
+        #    [1. 1. 1. 2. 2. 2.]
+        #    [3. 3. 3. 4. 4. 4.]
+        #    [3. 3. 3. 4. 4. 4.]]]]
+        output = interpolate_nd(
+            data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_nearest_axes_3_2",
+        )
+
+    @staticmethod
+    def export_resize_upsample_sizes_nearest_axes_2_3() -> None:
+        axes = [2, 3]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+            axes=axes,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([7, 8], dtype=np.int64)
+
+        # [[[[1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]]]]
+        output = interpolate_nd(
+            data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_upsample_sizes_nearest_axes_2_3",
+        )
+
+    @staticmethod
+    def export_resize_upsample_sizes_nearest_axes_3_2() -> None:
+        axes = [3, 2]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+            axes=axes,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([8, 7], dtype=np.int64)
+
+        # [[[[1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]]]]
+        output = interpolate_nd(
+            data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_upsample_sizes_nearest_axes_3_2",
+        )
+
+    @staticmethod
+    def export_resize_tf_crop_and_resize_axes_2_3() -> None:
+        axes = [2, 3]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "roi", "", "sizes"],
+            outputs=["Y"],
+            mode="linear",
+            coordinate_transformation_mode="tf_crop_and_resize",
+            axes=axes,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        # Note: for some rois, the result may be different with that of TF for inaccurate floating point
+        roi = np.array([0.4, 0.6, 0.6, 0.8], dtype=np.float32)
+        sizes = np.array([3, 3], dtype=np.int64)
+
+        # [[[[ 7.6000004  7.9        8.2      ]
+        #    [ 8.8        9.1        9.400001 ]
+        #    [10.        10.3       10.6      ]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: linear_coeffs(x),
+            output_size=sizes,
+            roi=roi,
+            axes=axes,
+            coordinate_transformation_mode="tf_crop_and_resize",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, roi, sizes],
+            outputs=[output],
+            name="test_resize_tf_crop_and_resize_axes_2_3",
+        )
+
+    @staticmethod
+    def export_resize_tf_crop_and_resize_axes_3_2() -> None:
+        axes = [3, 2]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "roi", "", "sizes"],
+            outputs=["Y"],
+            mode="linear",
+            coordinate_transformation_mode="tf_crop_and_resize",
+            axes=axes,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                        [9, 10, 11, 12],
+                        [13, 14, 15, 16],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        # Note: for some rois, the result may be different with that of TF for inaccurate floating point
+        roi = np.array([0.6, 0.4, 0.8, 0.6], dtype=np.float32)
+        sizes = np.array([3, 3], dtype=np.int64)
+
+        # [[[[ 7.6000004  7.9        8.2      ]
+        #    [ 8.8        9.1        9.400001 ]
+        #    [10.        10.3       10.6      ]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: linear_coeffs(x),
+            output_size=sizes,
+            roi=roi,
+            axes=axes,
+            coordinate_transformation_mode="tf_crop_and_resize",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, roi, sizes],
+            outputs=[output],
+            name="test_resize_tf_crop_and_resize_axes_3_2",
+        )
+
+    @staticmethod
+    def export_resize_upsample_sizes_nearest_not_larger() -> None:
+        keep_aspect_ratio_policy = "not_larger"
+        axes = [2, 3]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+            axes=axes,
+            keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([7, 8], dtype=np.int64)  # Results in 7x7
+
+        # [[[[1. 1. 1. 1. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2.]
+        #    [3. 3. 3. 3. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4.]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: nearest_coeffs(x),
+            output_size=sizes,
+            axes=axes,
+            keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_upsample_sizes_nearest_not_larger",
+        )
+
+    @staticmethod
+    def export_resize_upsample_sizes_nearest_not_smaller() -> None:
+        keep_aspect_ratio_policy = "not_smaller"
+        axes = [2, 3]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+            axes=axes,
+            keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([7, 8], dtype=np.int64)  # Results in 8x8
+
+        # [[[[1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [1. 1. 1. 1. 2. 2. 2. 2.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]
+        #    [3. 3. 3. 3. 4. 4. 4. 4.]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: nearest_coeffs(x),
+            output_size=sizes,
+            axes=axes,
+            keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_upsample_sizes_nearest_not_smaller",
+        )
+
+    @staticmethod
+    def export_resize_downsample_sizes_nearest_not_larger() -> None:
+        keep_aspect_ratio_policy = "not_larger"
+        axes = [2, 3]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+            axes=axes,
+            keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 3], dtype=np.int64)  # Results in 1x2
+
+        # [[[[1. 3.]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: nearest_coeffs(x),
+            output_size=sizes,
+            axes=axes,
+            keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_downsample_sizes_nearest_not_larger",
+        )
+
+    @staticmethod
+    def export_resize_downsample_sizes_nearest_not_smaller() -> None:
+        keep_aspect_ratio_policy = "not_smaller"
+        axes = [2, 3]
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "", "sizes"],
+            outputs=["Y"],
+            mode="nearest",
+            axes=axes,
+            keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2, 3, 4],
+                        [5, 6, 7, 8],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        sizes = np.array([1, 3], dtype=np.int64)  # Results in 2x3
+
+        # [[[[1. 2. 4.]
+        #    [5. 6. 8.]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: nearest_coeffs(x),
+            output_size=sizes,
+            axes=axes,
+            keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, sizes],
+            outputs=[output],
+            name="test_resize_downsample_sizes_nearest_not_smaller",
+        )
+
+    @staticmethod
+    def export_resize_downsample_scales_linear_half_pixel_symmetric() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="linear",
+            coordinate_transformation_mode="half_pixel_symmetric",
+        )
+
+        data = np.array([[[[1, 2, 3, 4]]]], dtype=np.float32)
+        scales = np.array([1.0, 1.0, 1.0, 0.6], dtype=np.float32)
+
+        # [[[[1.6666667, 3.3333333]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: linear_coeffs(x),
+            scale_factors=scales,
+            coordinate_transformation_mode="half_pixel_symmetric",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_downsample_scales_linear_half_pixel_symmetric",
+        )
+
+    @staticmethod
+    def export_resize_upsample_scales_linear_half_pixel_symmetric() -> None:
+        node = onnx.helper.make_node(
+            "Resize",
+            inputs=["X", "", "scales"],
+            outputs=["Y"],
+            mode="linear",
+            coordinate_transformation_mode="half_pixel_symmetric",
+        )
+
+        data = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)
+        scales = np.array([1.0, 1.0, 2.3, 2.94], dtype=np.float32)
+
+        # [[[[1.        , 1.15986395, 1.5       , 1.84013605, 2.        ],
+        #    [1.56521738, 1.72508133, 2.06521738, 2.40535343, 2.56521738],
+        #    [2.43478262, 2.59464657, 2.93478262, 3.27491867, 3.43478262],
+        #    [3.        , 3.15986395, 3.5       , 3.84013605, 4.        ]]]]
+        output = interpolate_nd(
+            data,
+            lambda x, _: linear_coeffs(x),
+            scale_factors=scales,
+            coordinate_transformation_mode="half_pixel_symmetric",
+        ).astype(np.float32)
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_resize_upsample_scales_linear_half_pixel_symmetric",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reversesequence.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reversesequence.py
new file mode 100644
index 0000000000000000000000000000000000000000..fba86660d48973782bcdace71a859dbf294553d0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/reversesequence.py
@@ -0,0 +1,86 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class ReverseSequence(Base):
+    @staticmethod
+    def export_reversesequence_time() -> None:
+        node = onnx.helper.make_node(
+            "ReverseSequence",
+            inputs=["x", "sequence_lens"],
+            outputs=["y"],
+            time_axis=0,
+            batch_axis=1,
+        )
+        x = np.array(
+            [
+                [0.0, 4.0, 8.0, 12.0],
+                [1.0, 5.0, 9.0, 13.0],
+                [2.0, 6.0, 10.0, 14.0],
+                [3.0, 7.0, 11.0, 15.0],
+            ],
+            dtype=np.float32,
+        )
+        sequence_lens = np.array([4, 3, 2, 1], dtype=np.int64)
+
+        y = np.array(
+            [
+                [3.0, 6.0, 9.0, 12.0],
+                [2.0, 5.0, 8.0, 13.0],
+                [1.0, 4.0, 10.0, 14.0],
+                [0.0, 7.0, 11.0, 15.0],
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[x, sequence_lens],
+            outputs=[y],
+            name="test_reversesequence_time",
+        )
+
+    @staticmethod
+    def export_reversesequence_batch() -> None:
+        node = onnx.helper.make_node(
+            "ReverseSequence",
+            inputs=["x", "sequence_lens"],
+            outputs=["y"],
+            time_axis=1,
+            batch_axis=0,
+        )
+        x = np.array(
+            [
+                [0.0, 1.0, 2.0, 3.0],
+                [4.0, 5.0, 6.0, 7.0],
+                [8.0, 9.0, 10.0, 11.0],
+                [12.0, 13.0, 14.0, 15.0],
+            ],
+            dtype=np.float32,
+        )
+        sequence_lens = np.array([1, 2, 3, 4], dtype=np.int64)
+
+        y = np.array(
+            [
+                [0.0, 1.0, 2.0, 3.0],
+                [5.0, 4.0, 6.0, 7.0],
+                [10.0, 9.0, 8.0, 11.0],
+                [15.0, 14.0, 13.0, 12.0],
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[x, sequence_lens],
+            outputs=[y],
+            name="test_reversesequence_batch",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rmsnormalization.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rmsnormalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..5415158e278691bff2b7d36e8e52e2cdb4785bfa
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rmsnormalization.py
@@ -0,0 +1,126 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.reference.ops.op_rms_normalization import _rms_normalization
+
+
+def calculate_normalized_shape(x_shape, axis):
+    rank = len(x_shape)
+    if axis < 0:
+        axis = axis + rank
+    return x_shape[axis:]
+
+
+class RMSNormalization(Base):
+    @staticmethod
+    def export() -> None:
+        X = np.random.randn(2, 3, 4, 5).astype(np.float32)
+
+        def case(axis: int) -> None:
+            normalized_shape = calculate_normalized_shape(X.shape, axis)
+            W = np.random.randn(*normalized_shape).astype(np.float32)
+            Y = _rms_normalization(X, W, axis=axis)
+
+            node = onnx.helper.make_node(
+                "RMSNormalization",
+                inputs=["X", "W"],
+                outputs=["Y"],
+                axis=axis,
+            )
+
+            if axis < 0:
+                name = f"test_rms_normalization_4d_axis_negative_{-axis}"
+            else:
+                name = f"test_rms_normalization_4d_axis{axis}"
+
+            expect(node, inputs=[X, W], outputs=[Y], name=name)
+
+        for i in range(len(X.shape)):
+            case(i)
+            case(i - len(X.shape))
+
+    @staticmethod
+    def export_default_axis() -> None:
+        X = np.random.randn(2, 3, 4, 5).astype(np.float32)
+
+        # Default axis in RMSNormalization is -1.
+        normalized_shape = calculate_normalized_shape(X.shape, -1)
+        W = np.random.randn(*normalized_shape).astype(np.float32)
+        # Axis is default to -1 in the reference implementation.
+        Y = _rms_normalization(X, W)
+
+        # Not specifying axis attribute means -1.
+        node = onnx.helper.make_node(
+            "RMSNormalization",
+            inputs=["X", "W"],
+            outputs=["Y"],
+        )
+
+        expect(
+            node,
+            inputs=[X, W],
+            outputs=[Y],
+            name="test_rms_normalization_default_axis",
+        )
+
+    @staticmethod
+    def export2d() -> None:
+        X = np.random.randn(3, 4).astype(np.float32)
+
+        def case(axis: int) -> None:
+            normalized_shape = calculate_normalized_shape(X.shape, axis)
+            W = np.random.randn(*normalized_shape).astype(np.float32)
+            Y = _rms_normalization(X, W, axis=axis)
+
+            node = onnx.helper.make_node(
+                "RMSNormalization",
+                inputs=["X", "W"],
+                outputs=["Y"],
+                axis=axis,
+            )
+
+            if axis < 0:
+                name = f"test_rms_normalization_2d_axis_negative_{-axis}"
+            else:
+                name = f"test_rms_normalization_2d_axis{axis}"
+
+            expect(node, inputs=[X, W], outputs=[Y], name=name)
+
+        for i in range(len(X.shape)):
+            case(i)
+            case(i - len(X.shape))
+
+    @staticmethod
+    def export3d_epsilon() -> None:
+        epsilon = 1e-1
+        X = np.random.randn(2, 3, 5).astype(np.float32)
+
+        def case(axis: int) -> None:
+            normalized_shape = calculate_normalized_shape(X.shape, axis)
+            W = np.random.randn(*normalized_shape).astype(np.float32)
+            Y = _rms_normalization(X, W, axis=axis, epsilon=epsilon)
+            node = onnx.helper.make_node(
+                "RMSNormalization",
+                inputs=["X", "W"],
+                outputs=["Y"],
+                axis=axis,
+                epsilon=epsilon,
+            )
+
+            if axis < 0:
+                name = f"test_rms_normalization_3d_axis_negative_{-axis}_epsilon"
+            else:
+                name = f"test_rms_normalization_3d_axis{axis}_epsilon"
+
+            expect(node, inputs=[X, W], outputs=[Y], name=name)
+
+        for i in range(len(X.shape)):
+            case(i)
+            case(i - len(X.shape))
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rnn.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..983f37dc6eb5f56aad41a77b5dc8f5fc649f56f0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rnn.py
@@ -0,0 +1,220 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class RNNHelper:
+    def __init__(self, **params: Any) -> None:
+        # RNN Input Names
+        X = "X"
+        W = "W"
+        R = "R"
+        B = "B"
+        H_0 = "initial_h"
+        LAYOUT = "layout"
+
+        required_inputs = [X, W, R]
+        for i in required_inputs:
+            assert i in params, f"Missing Required Input: {i}"
+
+        self.num_directions = params[str(W)].shape[0]
+
+        if self.num_directions == 1:
+            for k, v in params.items():
+                if k != X:
+                    params[k] = np.squeeze(v, axis=0)
+
+            hidden_size = params[R].shape[-1]
+            batch_size = params[X].shape[1]
+
+            layout = params.get(LAYOUT, 0)
+            x = params[X]
+            x = x if layout == 0 else np.swapaxes(x, 0, 1)
+            b = (
+                params[B]
+                if B in params
+                else np.zeros(2 * hidden_size, dtype=np.float32)
+            )
+            h_0 = (
+                params[H_0]
+                if H_0 in params
+                else np.zeros((batch_size, hidden_size), dtype=np.float32)
+            )
+
+            self.X = x
+            self.W = params[W]
+            self.R = params[R]
+            self.B = b
+            self.H_0 = h_0
+            self.LAYOUT = layout
+
+        else:
+            raise NotImplementedError()
+
+    def f(self, x: np.ndarray) -> np.ndarray:
+        return np.tanh(x)
+
+    def step(self) -> tuple[np.ndarray, np.ndarray]:
+        seq_length = self.X.shape[0]
+        hidden_size = self.H_0.shape[-1]
+        batch_size = self.X.shape[1]
+
+        Y = np.empty([seq_length, self.num_directions, batch_size, hidden_size])
+        h_list = []
+
+        H_t = self.H_0
+        for x in np.split(self.X, self.X.shape[0], axis=0):
+            H = self.f(
+                np.dot(x, np.transpose(self.W))
+                + np.dot(H_t, np.transpose(self.R))
+                + np.add(*np.split(self.B, 2))
+            )
+            h_list.append(H)
+            H_t = H
+
+        concatenated = np.concatenate(h_list)
+        if self.num_directions == 1:
+            Y[:, 0, :, :] = concatenated
+
+        if self.LAYOUT == 0:
+            Y_h = Y[-1]
+        else:
+            Y = np.transpose(Y, [2, 0, 1, 3])
+            Y_h = Y[:, :, -1, :]
+
+        return Y, Y_h
+
+
+class RNN(Base):
+    @staticmethod
+    def export_defaults() -> None:
+        input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]).astype(np.float32)
+
+        input_size = 2
+        hidden_size = 4
+        weight_scale = 0.1
+
+        node = onnx.helper.make_node(
+            "RNN", inputs=["X", "W", "R"], outputs=["", "Y_h"], hidden_size=hidden_size
+        )
+
+        W = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)
+        R = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)
+
+        rnn = RNNHelper(X=input, W=W, R=R)
+        _, Y_h = rnn.step()
+        expect(
+            node,
+            inputs=[input, W, R],
+            outputs=[Y_h.astype(np.float32)],
+            name="test_simple_rnn_defaults",
+        )
+
+    @staticmethod
+    def export_initial_bias() -> None:
+        input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(
+            np.float32
+        )
+
+        input_size = 3
+        hidden_size = 5
+        custom_bias = 0.1
+        weight_scale = 0.1
+
+        node = onnx.helper.make_node(
+            "RNN",
+            inputs=["X", "W", "R", "B"],
+            outputs=["", "Y_h"],
+            hidden_size=hidden_size,
+        )
+
+        W = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)
+        R = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)
+
+        # Adding custom bias
+        W_B = custom_bias * np.ones((1, hidden_size)).astype(np.float32)
+        R_B = np.zeros((1, hidden_size)).astype(np.float32)
+        B = np.concatenate((W_B, R_B), axis=1)
+
+        rnn = RNNHelper(X=input, W=W, R=R, B=B)
+        _, Y_h = rnn.step()
+        expect(
+            node,
+            inputs=[input, W, R, B],
+            outputs=[Y_h.astype(np.float32)],
+            name="test_simple_rnn_with_initial_bias",
+        )
+
+    @staticmethod
+    def export_seq_length() -> None:
+        input = np.array(
+            [
+                [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
+                [[10.0, 11.0, 12.0], [13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],
+            ]
+        ).astype(np.float32)
+
+        input_size = 3
+        hidden_size = 5
+
+        node = onnx.helper.make_node(
+            "RNN",
+            inputs=["X", "W", "R", "B"],
+            outputs=["", "Y_h"],
+            hidden_size=hidden_size,
+        )
+
+        W = np.random.randn(1, hidden_size, input_size).astype(np.float32)
+        R = np.random.randn(1, hidden_size, hidden_size).astype(np.float32)
+
+        # Adding custom bias
+        W_B = np.random.randn(1, hidden_size).astype(np.float32)
+        R_B = np.random.randn(1, hidden_size).astype(np.float32)
+        B = np.concatenate((W_B, R_B), axis=1)
+
+        rnn = RNNHelper(X=input, W=W, R=R, B=B)
+        _, Y_h = rnn.step()
+        expect(
+            node,
+            inputs=[input, W, R, B],
+            outputs=[Y_h.astype(np.float32)],
+            name="test_rnn_seq_length",
+        )
+
+    @staticmethod
+    def export_batchwise() -> None:
+        input = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]).astype(np.float32)
+
+        input_size = 2
+        hidden_size = 4
+        weight_scale = 0.5
+        layout = 1
+
+        node = onnx.helper.make_node(
+            "RNN",
+            inputs=["X", "W", "R"],
+            outputs=["Y", "Y_h"],
+            hidden_size=hidden_size,
+            layout=layout,
+        )
+
+        W = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)
+        R = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)
+
+        rnn = RNNHelper(X=input, W=W, R=R, layout=layout)
+        Y, Y_h = rnn.step()
+        expect(
+            node,
+            inputs=[input, W, R],
+            outputs=[Y.astype(np.float32), Y_h.astype(np.float32)],
+            name="test_simple_rnn_batchwise",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/roialign.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/roialign.py
new file mode 100644
index 0000000000000000000000000000000000000000..b59ef06996c63a8cb46e3ce638fa75ac17c4fdff
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/roialign.py
@@ -0,0 +1,446 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def get_roi_align_input_values():
+    X = np.array(
+        [
+            [
+                [
+                    [
+                        0.2764,
+                        0.7150,
+                        0.1958,
+                        0.3416,
+                        0.4638,
+                        0.0259,
+                        0.2963,
+                        0.6518,
+                        0.4856,
+                        0.7250,
+                    ],
+                    [
+                        0.9637,
+                        0.0895,
+                        0.2919,
+                        0.6753,
+                        0.0234,
+                        0.6132,
+                        0.8085,
+                        0.5324,
+                        0.8992,
+                        0.4467,
+                    ],
+                    [
+                        0.3265,
+                        0.8479,
+                        0.9698,
+                        0.2471,
+                        0.9336,
+                        0.1878,
+                        0.4766,
+                        0.4308,
+                        0.3400,
+                        0.2162,
+                    ],
+                    [
+                        0.0206,
+                        0.1720,
+                        0.2155,
+                        0.4394,
+                        0.0653,
+                        0.3406,
+                        0.7724,
+                        0.3921,
+                        0.2541,
+                        0.5799,
+                    ],
+                    [
+                        0.4062,
+                        0.2194,
+                        0.4473,
+                        0.4687,
+                        0.7109,
+                        0.9327,
+                        0.9815,
+                        0.6320,
+                        0.1728,
+                        0.6119,
+                    ],
+                    [
+                        0.3097,
+                        0.1283,
+                        0.4984,
+                        0.5068,
+                        0.4279,
+                        0.0173,
+                        0.4388,
+                        0.0430,
+                        0.4671,
+                        0.7119,
+                    ],
+                    [
+                        0.1011,
+                        0.8477,
+                        0.4726,
+                        0.1777,
+                        0.9923,
+                        0.4042,
+                        0.1869,
+                        0.7795,
+                        0.9946,
+                        0.9689,
+                    ],
+                    [
+                        0.1366,
+                        0.3671,
+                        0.7011,
+                        0.6234,
+                        0.9867,
+                        0.5585,
+                        0.6985,
+                        0.5609,
+                        0.8788,
+                        0.9928,
+                    ],
+                    [
+                        0.5697,
+                        0.8511,
+                        0.6711,
+                        0.9406,
+                        0.8751,
+                        0.7496,
+                        0.1650,
+                        0.1049,
+                        0.1559,
+                        0.2514,
+                    ],
+                    [
+                        0.7012,
+                        0.4056,
+                        0.7879,
+                        0.3461,
+                        0.0415,
+                        0.2998,
+                        0.5094,
+                        0.3727,
+                        0.5482,
+                        0.0502,
+                    ],
+                ]
+            ]
+        ],
+        dtype=np.float32,
+    )
+    batch_indices = np.array([0, 0, 0], dtype=np.int64)
+    rois = np.array([[0, 0, 9, 9], [0, 5, 4, 9], [5, 5, 9, 9]], dtype=np.float32)
+    return X, batch_indices, rois
+
+
+class RoiAlign(Base):
+    @staticmethod
+    def export_roialign_aligned_false() -> None:
+        node = onnx.helper.make_node(
+            "RoiAlign",
+            inputs=["X", "rois", "batch_indices"],
+            outputs=["Y"],
+            spatial_scale=1.0,
+            output_height=5,
+            output_width=5,
+            sampling_ratio=2,
+            coordinate_transformation_mode="output_half_pixel",
+        )
+
+        X, batch_indices, rois = get_roi_align_input_values()
+        # (num_rois, C, output_height, output_width)
+        Y = np.array(
+            [
+                [
+                    [
+                        [0.4664, 0.4466, 0.3405, 0.5688, 0.6068],
+                        [0.3714, 0.4296, 0.3835, 0.5562, 0.3510],
+                        [0.2768, 0.4883, 0.5222, 0.5528, 0.4171],
+                        [0.4713, 0.4844, 0.6904, 0.4920, 0.8774],
+                        [0.6239, 0.7125, 0.6289, 0.3355, 0.3495],
+                    ]
+                ],
+                [
+                    [
+                        [0.3022, 0.4305, 0.4696, 0.3978, 0.5423],
+                        [0.3656, 0.7050, 0.5165, 0.3172, 0.7015],
+                        [0.2912, 0.5059, 0.6476, 0.6235, 0.8299],
+                        [0.5916, 0.7389, 0.7048, 0.8372, 0.8893],
+                        [0.6227, 0.6153, 0.7097, 0.6154, 0.4585],
+                    ]
+                ],
+                [
+                    [
+                        [0.2384, 0.3379, 0.3717, 0.6100, 0.7601],
+                        [0.3767, 0.3785, 0.7147, 0.9243, 0.9727],
+                        [0.5749, 0.5826, 0.5709, 0.7619, 0.8770],
+                        [0.5355, 0.2566, 0.2141, 0.2796, 0.3600],
+                        [0.4365, 0.3504, 0.2887, 0.3661, 0.2349],
+                    ]
+                ],
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, rois, batch_indices],
+            outputs=[Y],
+            name="test_roialign_aligned_false",
+        )
+
+    @staticmethod
+    def export_roialign_aligned_true() -> None:
+        node = onnx.helper.make_node(
+            "RoiAlign",
+            inputs=["X", "rois", "batch_indices"],
+            outputs=["Y"],
+            spatial_scale=1.0,
+            output_height=5,
+            output_width=5,
+            sampling_ratio=2,
+            coordinate_transformation_mode="half_pixel",
+        )
+
+        X, batch_indices, rois = get_roi_align_input_values()
+        # (num_rois, C, output_height, output_width)
+        Y = np.array(
+            [
+                [
+                    [
+                        [0.5178, 0.3434, 0.3229, 0.4474, 0.6344],
+                        [0.4031, 0.5366, 0.4428, 0.4861, 0.4023],
+                        [0.2512, 0.4002, 0.5155, 0.6954, 0.3465],
+                        [0.3350, 0.4601, 0.5881, 0.3439, 0.6849],
+                        [0.4932, 0.7141, 0.8217, 0.4719, 0.4039],
+                    ]
+                ],
+                [
+                    [
+                        [0.3070, 0.2187, 0.3337, 0.4880, 0.4870],
+                        [0.1871, 0.4914, 0.5561, 0.4192, 0.3686],
+                        [0.1433, 0.4608, 0.5971, 0.5310, 0.4982],
+                        [0.2788, 0.4386, 0.6022, 0.7000, 0.7524],
+                        [0.5774, 0.7024, 0.7251, 0.7338, 0.8163],
+                    ]
+                ],
+                [
+                    [
+                        [0.2393, 0.4075, 0.3379, 0.2525, 0.4743],
+                        [0.3671, 0.2702, 0.4105, 0.6419, 0.8308],
+                        [0.5556, 0.4543, 0.5564, 0.7502, 0.9300],
+                        [0.6626, 0.5617, 0.4813, 0.4954, 0.6663],
+                        [0.6636, 0.3721, 0.2056, 0.1928, 0.2478],
+                    ]
+                ],
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[X, rois, batch_indices],
+            outputs=[Y],
+            name="test_roialign_aligned_true",
+        )
+
+    @staticmethod
+    def export_roialign_mode_max() -> None:
+        X = np.array(
+            [
+                [
+                    [
+                        [
+                            0.2764,
+                            0.715,
+                            0.1958,
+                            0.3416,
+                            0.4638,
+                            0.0259,
+                            0.2963,
+                            0.6518,
+                            0.4856,
+                            0.725,
+                        ],
+                        [
+                            0.9637,
+                            0.0895,
+                            0.2919,
+                            0.6753,
+                            0.0234,
+                            0.6132,
+                            0.8085,
+                            0.5324,
+                            0.8992,
+                            0.4467,
+                        ],
+                        [
+                            0.3265,
+                            0.8479,
+                            0.9698,
+                            0.2471,
+                            0.9336,
+                            0.1878,
+                            0.4766,
+                            0.4308,
+                            0.34,
+                            0.2162,
+                        ],
+                        [
+                            0.0206,
+                            0.172,
+                            0.2155,
+                            0.4394,
+                            0.0653,
+                            0.3406,
+                            0.7724,
+                            0.3921,
+                            0.2541,
+                            0.5799,
+                        ],
+                        [
+                            0.4062,
+                            0.2194,
+                            0.4473,
+                            0.4687,
+                            0.7109,
+                            0.9327,
+                            0.9815,
+                            0.632,
+                            0.1728,
+                            0.6119,
+                        ],
+                        [
+                            0.3097,
+                            0.1283,
+                            0.4984,
+                            0.5068,
+                            0.4279,
+                            0.0173,
+                            0.4388,
+                            0.043,
+                            0.4671,
+                            0.7119,
+                        ],
+                        [
+                            0.1011,
+                            0.8477,
+                            0.4726,
+                            0.1777,
+                            0.9923,
+                            0.4042,
+                            0.1869,
+                            0.7795,
+                            0.9946,
+                            0.9689,
+                        ],
+                        [
+                            0.1366,
+                            0.3671,
+                            0.7011,
+                            0.6234,
+                            0.9867,
+                            0.5585,
+                            0.6985,
+                            0.5609,
+                            0.8788,
+                            0.9928,
+                        ],
+                        [
+                            0.5697,
+                            0.8511,
+                            0.6711,
+                            0.9406,
+                            0.8751,
+                            0.7496,
+                            0.165,
+                            0.1049,
+                            0.1559,
+                            0.2514,
+                        ],
+                        [
+                            0.7012,
+                            0.4056,
+                            0.7879,
+                            0.3461,
+                            0.0415,
+                            0.2998,
+                            0.5094,
+                            0.3727,
+                            0.5482,
+                            0.0502,
+                        ],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+        rois = np.array(
+            [[0.0, 0.0, 9.0, 9.0], [0.0, 5.0, 4.0, 9.0], [5.0, 5.0, 9.0, 9.0]],
+            dtype=np.float32,
+        )
+        batch_indices = np.array([0, 0, 0], dtype=np.int64)
+
+        Y = np.array(
+            [
+                [
+                    [
+                        [0.3445228, 0.37310338, 0.37865096, 0.446696, 0.37991184],
+                        [0.4133513, 0.5455125, 0.6651902, 0.55805874, 0.27110294],
+                        [0.21223956, 0.40924096, 0.8417618, 0.792561, 0.37196714],
+                        [0.46835402, 0.39741728, 0.8012819, 0.4969306, 0.5495158],
+                        [0.3595896, 0.5196813, 0.5403741, 0.23814403, 0.19992709],
+                    ]
+                ],
+                [
+                    [
+                        [0.30517197, 0.5086199, 0.3189761, 0.4054401, 0.47630402],
+                        [0.50862, 0.8477, 0.37808004, 0.24936005, 0.79384017],
+                        [0.17620805, 0.29368007, 0.44870415, 0.4987201, 0.63148826],
+                        [0.51066005, 0.8511, 0.5368801, 0.9406, 0.70008016],
+                        [0.4487681, 0.51066035, 0.5042561, 0.5643603, 0.42004836],
+                    ]
+                ],
+                [
+                    [
+                        [0.21062402, 0.3510401, 0.37416005, 0.5967599, 0.46507207],
+                        [0.32336006, 0.31180006, 0.6236001, 0.9946, 0.7751202],
+                        [0.35744014, 0.5588001, 0.35897616, 0.7030401, 0.6353923],
+                        [0.5996801, 0.27940005, 0.17948808, 0.35152006, 0.31769615],
+                        [0.3598083, 0.40752012, 0.2385281, 0.43856013, 0.26313624],
+                    ]
+                ],
+            ],
+            dtype=np.float32,
+        )
+
+        node = onnx.helper.make_node(
+            "RoiAlign",
+            inputs=["X", "rois", "batch_indices"],
+            mode="max",
+            outputs=["Y"],
+            spatial_scale=1.0,
+            output_height=5,
+            output_width=5,
+            sampling_ratio=2,
+            coordinate_transformation_mode="output_half_pixel",
+        )
+
+        expect(
+            node,
+            inputs=[X, rois, batch_indices],
+            outputs=[Y],
+            name="test_roialign_mode_max",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rotaryembedding.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rotaryembedding.py
new file mode 100644
index 0000000000000000000000000000000000000000..3852d6303e336fb7f5723ab7d60ce0b7cfb10284
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/rotaryembedding.py
@@ -0,0 +1,231 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+from onnx.reference.ops.op_rotary_embedding import rotary_embedding
+
+
+class RotaryEmbedding(Base):
+    @staticmethod
+    def export_rotary_embedding() -> None:
+        node = onnx.helper.make_node(
+            "RotaryEmbedding",
+            inputs=["input", "cos_cache", "sin_cache", "position_ids"],
+            outputs=["output"],
+        )
+
+        input_data = np.random.rand(2, 4, 3, 8).astype(np.float32)
+        position_ids_data = np.random.uniform(0, 50, (2, 3)).astype(np.int64)
+        sin_cache_data = np.random.rand(50, 4).astype(np.float32)
+        cos_cache_data = np.random.rand(50, 4).astype(np.float32)
+
+        expected_output = rotary_embedding(
+            input_data, cos_cache_data, sin_cache_data, position_ids=position_ids_data
+        )
+
+        expect(
+            node,
+            inputs=[input_data, cos_cache_data, sin_cache_data, position_ids_data],
+            outputs=[expected_output],
+            name="test_rotary_embedding",
+        )
+
+    @staticmethod
+    def export_rotary_embedding_3d_input() -> None:
+        num_heads = 4
+        node = onnx.helper.make_node(
+            "RotaryEmbedding",
+            inputs=["input", "cos_cache", "sin_cache", "position_ids"],
+            outputs=["output"],
+            num_heads=num_heads,
+        )
+
+        input_data = np.random.rand(2, 3, 32).astype(np.float32)
+        position_ids_data = np.random.uniform(0, 50, (2, 3)).astype(np.int64)
+        sin_cache_data = np.random.rand(50, 4).astype(np.float32)
+        cos_cache_data = np.random.rand(50, 4).astype(np.float32)
+
+        expected_output = rotary_embedding(
+            input_data,
+            cos_cache_data,
+            sin_cache_data,
+            position_ids=position_ids_data,
+            num_heads=num_heads,
+        )
+
+        expect(
+            node,
+            inputs=[input_data, cos_cache_data, sin_cache_data, position_ids_data],
+            outputs=[expected_output],
+            name="test_rotary_embedding_3d_input",
+        )
+
+    @staticmethod
+    def export_rotary_embedding_interleaved() -> None:
+        node = onnx.helper.make_node(
+            "RotaryEmbedding",
+            inputs=["input", "cos_cache", "sin_cache", "position_ids"],
+            outputs=["output"],
+            interleaved=1,
+        )
+
+        input_data = np.random.rand(2, 4, 3, 8).astype(np.float32)
+        position_ids_data = np.random.uniform(0, 50, (2, 3)).astype(np.int64)
+        sin_cache_data = np.random.rand(50, 4).astype(np.float32)
+        cos_cache_data = np.random.rand(50, 4).astype(np.float32)
+
+        expected_output = rotary_embedding(
+            input_data,
+            cos_cache_data,
+            sin_cache_data,
+            position_ids=position_ids_data,
+            interleaved=1,
+        )
+
+        expect(
+            node,
+            inputs=[input_data, cos_cache_data, sin_cache_data, position_ids_data],
+            outputs=[expected_output],
+            name="test_rotary_embedding_interleaved",
+        )
+
+    @staticmethod
+    def export_rotary_embedding_with_rotary_dim() -> None:
+        node = onnx.helper.make_node(
+            "RotaryEmbedding",
+            inputs=["input", "cos_cache", "sin_cache", "position_ids"],
+            outputs=["output"],
+            rotary_embedding_dim=4,
+        )
+
+        input_data = np.random.rand(2, 4, 3, 8).astype(np.float32)
+        position_ids_data = np.random.uniform(0, 50, (2, 3)).astype(np.int64)
+        sin_cache_data = np.random.rand(50, 4).astype(np.float32)
+        cos_cache_data = np.random.rand(50, 4).astype(np.float32)
+
+        expected_output = rotary_embedding(
+            input_data,
+            cos_cache_data,
+            sin_cache_data,
+            position_ids=position_ids_data,
+            rotary_embedding_dim=4,
+        )
+
+        expect(
+            node,
+            inputs=[input_data, cos_cache_data, sin_cache_data, position_ids_data],
+            outputs=[expected_output],
+            name="test_rotary_embedding_with_rotary_dim",
+        )
+
+    @staticmethod
+    def export_rotary_embedding_with_interleaved_rotary_dim() -> None:
+        node = onnx.helper.make_node(
+            "RotaryEmbedding",
+            inputs=["input", "cos_cache", "sin_cache", "position_ids"],
+            outputs=["output"],
+            rotary_embedding_dim=4,
+            interleaved=1,
+        )
+
+        input_data = np.random.rand(2, 4, 3, 8).astype(np.float32)
+        position_ids_data = np.random.uniform(0, 50, (2, 3)).astype(np.int64)
+        sin_cache_data = np.random.rand(50, 4).astype(np.float32)
+        cos_cache_data = np.random.rand(50, 4).astype(np.float32)
+
+        expected_output = rotary_embedding(
+            input_data,
+            cos_cache_data,
+            sin_cache_data,
+            position_ids=position_ids_data,
+            interleaved=1,
+            rotary_embedding_dim=4,
+        )
+
+        expect(
+            node,
+            inputs=[input_data, cos_cache_data, sin_cache_data, position_ids_data],
+            outputs=[expected_output],
+            name="test_rotary_embedding_with_interleaved_rotary_dim",
+        )
+
+    @staticmethod
+    def export_rotary_embedding_no_position_ids() -> None:
+        node = onnx.helper.make_node(
+            "RotaryEmbedding",
+            inputs=["input", "cos_cache", "sin_cache"],
+            outputs=["output"],
+        )
+
+        input_data = np.random.rand(2, 4, 3, 8).astype(np.float32)
+        sin_cache_data = np.random.rand(2, 3, 4).astype(np.float32)
+        cos_cache_data = np.random.rand(2, 3, 4).astype(np.float32)
+
+        expected_output = rotary_embedding(input_data, cos_cache_data, sin_cache_data)
+
+        expect(
+            node,
+            inputs=[input_data, cos_cache_data, sin_cache_data],
+            outputs=[expected_output],
+            name="test_rotary_embedding_no_position_ids",
+        )
+
+    @staticmethod
+    def export_rotary_embedding_no_position_ids_interleaved() -> None:
+        node = onnx.helper.make_node(
+            "RotaryEmbedding",
+            inputs=["input", "cos_cache", "sin_cache"],
+            outputs=["output"],
+            interleaved=1,
+        )
+
+        input_data = np.random.rand(2, 4, 3, 8).astype(np.float32)
+        sin_cache_data = np.random.rand(2, 3, 4).astype(np.float32)
+        cos_cache_data = np.random.rand(2, 3, 4).astype(np.float32)
+
+        expected_output = rotary_embedding(
+            input_data,
+            cos_cache_data,
+            sin_cache_data,
+            interleaved=1,
+        )
+
+        expect(
+            node,
+            inputs=[input_data, cos_cache_data, sin_cache_data],
+            outputs=[expected_output],
+            name="test_rotary_embedding_no_position_ids_interleaved",
+        )
+
+    @staticmethod
+    def export_rotary_embedding_no_position_ids_rotary_dim() -> None:
+        node = onnx.helper.make_node(
+            "RotaryEmbedding",
+            inputs=["input", "cos_cache", "sin_cache"],
+            outputs=["output"],
+            rotary_embedding_dim=4,
+        )
+
+        input_data = np.random.rand(2, 4, 3, 8).astype(np.float32)
+        sin_cache_data = np.random.rand(2, 3, 4).astype(np.float32)
+        cos_cache_data = np.random.rand(2, 3, 4).astype(np.float32)
+
+        expected_output = rotary_embedding(
+            input_data,
+            cos_cache_data,
+            sin_cache_data,
+            rotary_embedding_dim=4,
+        )
+
+        expect(
+            node,
+            inputs=[input_data, cos_cache_data, sin_cache_data],
+            outputs=[expected_output],
+            name="test_rotary_embedding_no_position_ids_rotary_dim",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/round.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/round.py
new file mode 100644
index 0000000000000000000000000000000000000000..6924285eeab71c3cee7bbb4cb4c49d8808df3df9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/round.py
@@ -0,0 +1,62 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Round(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Round",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array(
+            [
+                0.1,
+                0.5,
+                0.9,
+                1.2,
+                1.5,
+                1.8,
+                2.3,
+                2.5,
+                2.7,
+                -1.1,
+                -1.5,
+                -1.9,
+                -2.2,
+                -2.5,
+                -2.8,
+            ]
+        ).astype(np.float32)
+
+        # expected output
+        y = np.array(
+            [
+                0.0,
+                0.0,
+                1.0,
+                1.0,
+                2.0,
+                2.0,
+                2.0,
+                2.0,
+                3.0,
+                -1.0,
+                -2.0,
+                -2.0,
+                -2.0,
+                -2.0,
+                -3.0,
+            ]
+        ).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_round")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scan.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scan.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f251446787f1799bb7dac46c6b9d47f9f44ee7f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scan.py
@@ -0,0 +1,117 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Scan(Base):
+    @staticmethod
+    def export_scan_8() -> None:
+        # Given an input sequence [x1, ..., xN], sum up its elements using a scan
+        # returning the final state (x1+x2+...+xN) as well the scan_output
+        # [x1, x1+x2, ..., x1+x2+...+xN]
+        #
+        # create graph to represent scan body
+        sum_in = onnx.helper.make_tensor_value_info(
+            "sum_in", onnx.TensorProto.FLOAT, [2]
+        )
+        next = onnx.helper.make_tensor_value_info(  # noqa: A001
+            "next", onnx.TensorProto.FLOAT, [2]
+        )
+        sum_out = onnx.helper.make_tensor_value_info(
+            "sum_out", onnx.TensorProto.FLOAT, [2]
+        )
+        scan_out = onnx.helper.make_tensor_value_info(
+            "scan_out", onnx.TensorProto.FLOAT, [2]
+        )
+        add_node = onnx.helper.make_node(
+            "Add", inputs=["sum_in", "next"], outputs=["sum_out"]
+        )
+        id_node = onnx.helper.make_node(
+            "Identity", inputs=["sum_out"], outputs=["scan_out"]
+        )
+        scan_body = onnx.helper.make_graph(
+            [add_node, id_node], "scan_body", [sum_in, next], [sum_out, scan_out]
+        )
+        # create scan op node
+        no_sequence_lens = ""  # optional input, not supplied
+        node = onnx.helper.make_node(
+            "Scan",
+            inputs=[no_sequence_lens, "initial", "x"],
+            outputs=["y", "z"],
+            num_scan_inputs=1,
+            body=scan_body,
+        )
+        # create inputs for batch-size 1, sequence-length 3, inner dimension 2
+        initial = np.array([0, 0]).astype(np.float32).reshape((1, 2))
+        x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))
+        # final state computed = [1 + 3 + 5, 2 + 4 + 6]
+        y = np.array([9, 12]).astype(np.float32).reshape((1, 2))
+        # scan-output computed
+        z = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))
+
+        expect(
+            node,
+            inputs=[initial, x],
+            outputs=[y, z],
+            name="test_scan_sum",
+            opset_imports=[onnx.helper.make_opsetid("", 8)],
+        )
+
+    @staticmethod
+    def export_scan_9() -> None:
+        # Given an input sequence [x1, ..., xN], sum up its elements using a scan
+        # returning the final state (x1+x2+...+xN) as well the scan_output
+        # [x1, x1+x2, ..., x1+x2+...+xN]
+        #
+        # create graph to represent scan body
+        sum_in = onnx.helper.make_tensor_value_info(
+            "sum_in", onnx.TensorProto.FLOAT, [2]
+        )
+        next = onnx.helper.make_tensor_value_info(  # noqa: A001
+            "next", onnx.TensorProto.FLOAT, [2]
+        )
+        sum_out = onnx.helper.make_tensor_value_info(
+            "sum_out", onnx.TensorProto.FLOAT, [2]
+        )
+        scan_out = onnx.helper.make_tensor_value_info(
+            "scan_out", onnx.TensorProto.FLOAT, [2]
+        )
+        add_node = onnx.helper.make_node(
+            "Add", inputs=["sum_in", "next"], outputs=["sum_out"]
+        )
+        id_node = onnx.helper.make_node(
+            "Identity", inputs=["sum_out"], outputs=["scan_out"]
+        )
+        scan_body = onnx.helper.make_graph(
+            [add_node, id_node], "scan_body", [sum_in, next], [sum_out, scan_out]
+        )
+        # create scan op node
+        node = onnx.helper.make_node(
+            "Scan",
+            inputs=["initial", "x"],
+            outputs=["y", "z"],
+            num_scan_inputs=1,
+            body=scan_body,
+        )
+        # create inputs for sequence-length 3, inner dimension 2
+        initial = np.array([0, 0]).astype(np.float32).reshape((2,))
+        x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))
+        # final state computed = [1 + 3 + 5, 2 + 4 + 6]
+        y = np.array([9, 12]).astype(np.float32).reshape((2,))
+        # scan-output computed
+        z = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))
+
+        expect(
+            node,
+            inputs=[initial, x],
+            outputs=[y, z],
+            name="test_scan9_sum",
+            opset_imports=[onnx.helper.make_opsetid("", 9)],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scatter.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scatter.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4ab3c7a3351b6e93db0d6c8d3084ba1ce4345b1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scatter.py
@@ -0,0 +1,105 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx import helper
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+# The below Scatter's numpy implementation is from https://stackoverflow.com/a/46204790/11767360
+def scatter(data, indices, updates, axis=0):  # type: ignore
+    if axis < 0:
+        axis = data.ndim + axis
+
+    idx_xsection_shape = indices.shape[:axis] + indices.shape[axis + 1 :]
+
+    def make_slice(arr, axis, i):  # type: ignore
+        slc = [slice(None)] * arr.ndim
+        slc[axis] = i
+        return slc
+
+    def unpack(packed):  # type: ignore
+        unpacked = packed[0]
+        for i in range(1, len(packed)):
+            unpacked = unpacked, packed[i]
+        return unpacked
+
+    # We use indices and axis parameters to create idx
+    # idx is in a form that can be used as a NumPy advanced indices for scattering of updates param. in data
+    idx = [
+        [
+            unpack(np.indices(idx_xsection_shape).reshape(indices.ndim - 1, -1)),
+            indices[tuple(make_slice(indices, axis, i))].reshape(1, -1)[0],
+        ]
+        for i in range(indices.shape[axis])
+    ]
+    idx = list(np.concatenate(idx, axis=1))
+    idx.insert(axis, idx.pop())
+
+    # updates_idx is a NumPy advanced indices for indexing of elements in the updates
+    updates_idx = list(idx)
+    updates_idx.pop(axis)
+    updates_idx.insert(
+        axis, np.repeat(np.arange(indices.shape[axis]), np.prod(idx_xsection_shape))
+    )
+
+    scattered = np.copy(data)
+    scattered[tuple(idx)] = updates[tuple(updates_idx)]
+    return scattered
+
+
+class Scatter(Base):
+    @staticmethod
+    def export_scatter_without_axis() -> None:
+        node = onnx.helper.make_node(
+            "Scatter",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+        )
+        data = np.zeros((3, 3), dtype=np.float32)
+        indices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)
+        updates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)
+
+        y = scatter(data, indices, updates)
+        # print(y) produces
+        # [[2.0, 1.1, 0.0],
+        #  [1.0, 0.0, 2.2],
+        #  [0.0, 2.1, 1.2]]
+
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[y],
+            name="test_scatter_without_axis",
+            opset_imports=[helper.make_opsetid("", 10)],
+        )
+
+    @staticmethod
+    def export_scatter_with_axis() -> None:
+        axis = 1
+        node = onnx.helper.make_node(
+            "Scatter",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            axis=axis,
+        )
+        data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
+        indices = np.array([[1, 3]], dtype=np.int64)
+        updates = np.array([[1.1, 2.1]], dtype=np.float32)
+
+        y = scatter(data, indices, updates, axis=axis)
+        # print(y) produces
+        # [[1.0, 1.1, 3.0, 2.1, 5.0]]
+
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[y],
+            name="test_scatter_with_axis",
+            opset_imports=[helper.make_opsetid("", 10)],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scatterelements.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scatterelements.py
new file mode 100644
index 0000000000000000000000000000000000000000..d35eaa97761a99be79cd4a1711de5579741cab8c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scatterelements.py
@@ -0,0 +1,228 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+# The below ScatterElements' numpy implementation is from https://stackoverflow.com/a/46204790/11767360
+def scatter_elements(data, indices, updates, axis=0, reduction="none"):  # type: ignore
+    if axis < 0:
+        axis = data.ndim + axis
+
+    idx_xsection_shape = indices.shape[:axis] + indices.shape[axis + 1 :]
+
+    def make_slice(arr, axis, i):  # type: ignore
+        slc = [slice(None)] * arr.ndim
+        slc[axis] = i
+        return slc
+
+    def unpack(packed):  # type: ignore
+        unpacked = packed[0]
+        for i in range(1, len(packed)):
+            unpacked = unpacked, packed[i]
+        return unpacked
+
+    def make_indices_for_duplicate(idx):  # type: ignore
+        final_idx = []
+        for i in range(len(idx[0])):
+            final_idx.append(  # noqa: PERF401
+                tuple(idx_element[i] for idx_element in idx)
+            )
+        return list(final_idx)
+
+    # We use indices and axis parameters to create idx
+    # idx is in a form that can be used as a NumPy advanced indices for scattering of updates param. in data
+    idx = [
+        [
+            unpack(np.indices(idx_xsection_shape).reshape(indices.ndim - 1, -1)),
+            indices[tuple(make_slice(indices, axis, i))].reshape(1, -1)[0],
+        ]
+        for i in range(indices.shape[axis])
+    ]
+    idx = list(np.concatenate(idx, axis=1))
+    idx.insert(axis, idx.pop())
+
+    # updates_idx is a NumPy advanced indices for indexing of elements in the updates
+    updates_idx = list(idx)
+    updates_idx.pop(axis)
+    updates_idx.insert(
+        axis, np.repeat(np.arange(indices.shape[axis]), np.prod(idx_xsection_shape))
+    )
+
+    scattered = np.copy(data)
+    if reduction == "none":
+        scattered[tuple(idx)] = updates[tuple(updates_idx)]
+    else:
+        idx, updates_idx = (
+            make_indices_for_duplicate(idx),
+            make_indices_for_duplicate(updates_idx),
+        )
+        for iter, idx_set in enumerate(idx):  # noqa: A001
+            if reduction == "add":
+                scattered[idx_set] += updates[updates_idx[iter]]
+            elif reduction == "mul":
+                scattered[idx_set] *= updates[updates_idx[iter]]
+            elif reduction == "max":
+                scattered[idx_set] = np.maximum(
+                    scattered[idx_set], updates[updates_idx[iter]]
+                )
+            elif reduction == "min":
+                scattered[idx_set] = np.minimum(
+                    scattered[idx_set], updates[updates_idx[iter]]
+                )
+    return scattered
+
+
+class ScatterElements(Base):
+    @staticmethod
+    def export_scatter_elements_without_axis() -> None:
+        node = onnx.helper.make_node(
+            "ScatterElements",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+        )
+        data = np.zeros((3, 3), dtype=np.float32)
+        indices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)
+        updates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)
+
+        y = scatter_elements(data, indices, updates)
+        # print(y) produces
+        # [[2.0, 1.1, 0.0],
+        #  [1.0, 0.0, 2.2],
+        #  [0.0, 2.1, 1.2]]
+
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[y],
+            name="test_scatter_elements_without_axis",
+        )
+
+    @staticmethod
+    def export_scatter_elements_with_axis() -> None:
+        axis = 1
+        node = onnx.helper.make_node(
+            "ScatterElements",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            axis=axis,
+        )
+        data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
+        indices = np.array([[1, 3]], dtype=np.int64)
+        updates = np.array([[1.1, 2.1]], dtype=np.float32)
+
+        y = scatter_elements(data, indices, updates, axis)
+        # print(y) produces
+        # [[1.0, 1.1, 3.0, 2.1, 5.0]]
+
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[y],
+            name="test_scatter_elements_with_axis",
+        )
+
+    @staticmethod
+    def export_scatter_elements_with_negative_indices() -> None:
+        axis = 1
+        node = onnx.helper.make_node(
+            "ScatterElements",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            axis=axis,
+        )
+        data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
+        indices = np.array([[1, -3]], dtype=np.int64)
+        updates = np.array([[1.1, 2.1]], dtype=np.float32)
+
+        y = scatter_elements(data, indices, updates, axis)
+        # print(y) produces
+        # [[1.0, 1.1, 2.1, 4.0, 5.0]]
+
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[y],
+            name="test_scatter_elements_with_negative_indices",
+        )
+
+    @staticmethod
+    def export_scatter_elements_with_duplicate_indices() -> None:
+        axis = 1
+        node = onnx.helper.make_node(
+            "ScatterElements",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            axis=axis,
+            reduction="add",
+        )
+        data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
+        indices = np.array([[1, 1]], dtype=np.int64)
+        updates = np.array([[1.1, 2.1]], dtype=np.float32)
+
+        y = scatter_elements(data, indices, updates, axis, reduction="add")
+        # print(y) produces
+        # [[1.0, 5.2, 3.0, 4.0, 5.0]]
+
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[y],
+            name="test_scatter_elements_with_duplicate_indices",
+        )
+
+    @staticmethod
+    def export_scatter_elements_with_reduction_max() -> None:
+        axis = 1
+        node = onnx.helper.make_node(
+            "ScatterElements",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            axis=axis,
+            reduction="max",
+        )
+        data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
+        indices = np.array([[1, 1]], dtype=np.int64)
+        updates = np.array([[1.1, 2.1]], dtype=np.float32)
+
+        y = scatter_elements(data, indices, updates, axis, reduction="max")
+        # print(y) produces
+        # [[1.0, 2.1, 3.0, 4.0, 5.0]]
+
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[y],
+            name="test_scatter_elements_with_reduction_max",
+        )
+
+    @staticmethod
+    def export_scatter_elements_with_reduction_min() -> None:
+        axis = 1
+        node = onnx.helper.make_node(
+            "ScatterElements",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            axis=axis,
+            reduction="min",
+        )
+        data = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)
+        indices = np.array([[1, 1]], dtype=np.int64)
+        updates = np.array([[1.1, 2.1]], dtype=np.float32)
+
+        y = scatter_elements(data, indices, updates, axis, reduction="min")
+        # print(y) produces
+        # [[1.0, 1.1, 3.0, 4.0, 5.0]]
+
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[y],
+            name="test_scatter_elements_with_reduction_min",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scatternd.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scatternd.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6f7222877fe3bab6dc157786d1d1ba88cc54b2f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/scatternd.py
@@ -0,0 +1,223 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def scatter_nd_impl(data, indices, updates, reduction="none"):  # type: ignore
+    # Check tensor shapes
+    assert indices.shape[-1] <= len(data.shape)
+    assert updates.shape == indices.shape[:-1] + data.shape[indices.shape[-1] :]
+
+    # Compute output
+    output = np.copy(data)
+    for i in np.ndindex(indices.shape[:-1]):
+        # NOTE: The order of iteration in this loop is not specified.
+        if reduction == "add":
+            output[tuple(indices[i])] += updates[i]
+        elif reduction == "mul":
+            output[tuple(indices[i])] *= updates[i]
+        elif reduction == "max":
+            output[tuple(indices[i])] = np.maximum(output[indices[i]], updates[i])
+        elif reduction == "min":
+            output[tuple(indices[i])] = np.minimum(output[indices[i]], updates[i])
+        else:
+            output[tuple(indices[i])] = updates[i]
+    return output
+
+
+class ScatterND(Base):
+    @staticmethod
+    def export_scatternd() -> None:
+        node = onnx.helper.make_node(
+            "ScatterND",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+        )
+        data = np.array(
+            [
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+            ],
+            dtype=np.float32,
+        )
+        indices = np.array([[0], [2]], dtype=np.int64)
+        updates = np.array(
+            [
+                [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+                [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+            ],
+            dtype=np.float32,
+        )
+        # Expecting output as np.array(
+        #    [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+        #     [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+        #     [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+        #     [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)
+        output = scatter_nd_impl(data, indices, updates)
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[output],
+            name="test_scatternd",
+        )
+
+    @staticmethod
+    def export_scatternd_add() -> None:
+        node = onnx.helper.make_node(
+            "ScatterND",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            reduction="add",
+        )
+        data = np.array(
+            [
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+            ],
+            dtype=np.float32,
+        )
+        indices = np.array([[0], [0]], dtype=np.int64)
+        updates = np.array(
+            [
+                [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+                [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+            ],
+            dtype=np.float32,
+        )
+        # Expecting output as np.array(
+        #    [[[7, 8, 9, 10], [13, 14, 15, 16], [18, 17, 16, 15], [16, 15, 14, 13]],
+        #     [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+        #     [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+        #     [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)
+        output = scatter_nd_impl(data, indices, updates, reduction="add")
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[output],
+            name="test_scatternd_add",
+        )
+
+    @staticmethod
+    def export_scatternd_multiply() -> None:
+        node = onnx.helper.make_node(
+            "ScatterND",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            reduction="mul",
+        )
+        data = np.array(
+            [
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+            ],
+            dtype=np.float32,
+        )
+        indices = np.array([[0], [0]], dtype=np.int64)
+        updates = np.array(
+            [
+                [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+                [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+            ],
+            dtype=np.float32,
+        )
+        # Expecting output as np.array(
+        #    [[[5, 10, 15, 20], [60, 72, 84, 96], [168, 147, 126, 105], [128, 96, 64, 32]],
+        #     [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+        #     [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+        #     [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)
+        output = scatter_nd_impl(data, indices, updates, reduction="mul")
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[output],
+            name="test_scatternd_multiply",
+        )
+
+    @staticmethod
+    def export_scatternd_max() -> None:
+        node = onnx.helper.make_node(
+            "ScatterND",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            reduction="max",
+        )
+        data = np.array(
+            [
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+            ],
+            dtype=np.float32,
+        )
+        indices = np.array([[0], [0]], dtype=np.int64)
+        updates = np.array(
+            [
+                [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+                [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+            ],
+            dtype=np.float32,
+        )
+        # Expecting output as np.array(
+        #    [[[5, 5, 5, 5], [6, 6, 7, 8], [8, 7, 7, 7], [8, 8 ,8, 8]],
+        #     [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+        #     [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+        #     [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)
+        output = scatter_nd_impl(data, indices, updates, reduction="max")
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[output],
+            name="test_scatternd_max",
+        )
+
+    @staticmethod
+    def export_scatternd_min() -> None:
+        node = onnx.helper.make_node(
+            "ScatterND",
+            inputs=["data", "indices", "updates"],
+            outputs=["y"],
+            reduction="min",
+        )
+        data = np.array(
+            [
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+                [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
+            ],
+            dtype=np.float32,
+        )
+        indices = np.array([[0], [0]], dtype=np.int64)
+        updates = np.array(
+            [
+                [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+                [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+            ],
+            dtype=np.float32,
+        )
+        # Expecting output as np.array(
+        #    [[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 3, 2, 1]],
+        #     [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
+        #     [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
+        #     [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)
+        output = scatter_nd_impl(data, indices, updates, reduction="min")
+        expect(
+            node,
+            inputs=[data, indices, updates],
+            outputs=[output],
+            name="test_scatternd_min",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/selu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/selu.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fd523806ec0bb76959391e2210037df0923e247
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/selu.py
@@ -0,0 +1,49 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Selu(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Selu", inputs=["x"], outputs=["y"], alpha=2.0, gamma=3.0
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        # expected output [-3.79272318, 0., 3.]
+        y = (
+            np.clip(x, 0, np.inf) * 3.0
+            + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0
+        )
+        expect(node, inputs=[x], outputs=[y], name="test_selu_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = (
+            np.clip(x, 0, np.inf) * 3.0
+            + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0
+        )
+        expect(node, inputs=[x], outputs=[y], name="test_selu")
+
+    @staticmethod
+    def export_selu_default() -> None:
+        default_alpha = 1.67326319217681884765625
+        default_gamma = 1.05070102214813232421875
+        node = onnx.helper.make_node(
+            "Selu",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = (
+            np.clip(x, 0, np.inf) * default_gamma
+            + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha * default_gamma
+        )
+        expect(node, inputs=[x], outputs=[y], name="test_selu_default")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sequence_map.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sequence_map.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ca9295bba9863237c64af52912a32cab495cf29
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sequence_map.py
@@ -0,0 +1,304 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class SequenceMap(Base):
+    @staticmethod
+    def export_sequence_map_identity_1_sequence():  # type: () -> None
+        body = onnx.helper.make_graph(
+            [onnx.helper.make_node("Identity", ["in0"], ["out0"])],
+            "seq_map_body",
+            [onnx.helper.make_tensor_value_info("in0", onnx.TensorProto.FLOAT, ["N"])],
+            [onnx.helper.make_tensor_value_info("out0", onnx.TensorProto.FLOAT, ["M"])],
+        )
+
+        node = onnx.helper.make_node(
+            "SequenceMap", inputs=["x"], outputs=["y"], body=body
+        )
+
+        x = [np.random.uniform(0.0, 1.0, 10).astype(np.float32) for _ in range(3)]
+        y = x
+        input_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+        ]
+        output_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+        ]
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            input_type_protos=input_type_protos,
+            output_type_protos=output_type_protos,
+            name="test_sequence_map_identity_1_sequence",
+        )
+
+    @staticmethod
+    def export_sequence_map_identity_2_sequences():  # type: () -> None
+        body = onnx.helper.make_graph(
+            [
+                onnx.helper.make_node("Identity", ["in0"], ["out0"]),
+                onnx.helper.make_node("Identity", ["in1"], ["out1"]),
+            ],
+            "seq_map_body",
+            [
+                onnx.helper.make_tensor_value_info(
+                    "in0", onnx.TensorProto.FLOAT, ["N"]
+                ),
+                onnx.helper.make_tensor_value_info(
+                    "in1", onnx.TensorProto.FLOAT, ["M"]
+                ),
+            ],
+            [
+                onnx.helper.make_tensor_value_info(
+                    "out0", onnx.TensorProto.FLOAT, ["N"]
+                ),
+                onnx.helper.make_tensor_value_info(
+                    "out1", onnx.TensorProto.FLOAT, ["M"]
+                ),
+            ],
+        )
+
+        node = onnx.helper.make_node(
+            "SequenceMap", inputs=["x0", "x1"], outputs=["y0", "y1"], body=body
+        )
+
+        x0 = [
+            np.random.uniform(0.0, 1.0, np.random.randint(1, 10)).astype(np.float32)
+            for _ in range(3)
+        ]
+        x1 = [
+            np.random.uniform(0.0, 1.0, np.random.randint(1, 10)).astype(np.float32)
+            for _ in range(3)
+        ]
+        y0 = x0
+        y1 = x1
+        input_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["M"])
+            ),
+        ]
+        output_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["M"])
+            ),
+        ]
+        expect(
+            node,
+            inputs=[x0, x1],
+            outputs=[y0, y1],
+            input_type_protos=input_type_protos,
+            output_type_protos=output_type_protos,
+            name="test_sequence_map_identity_2_sequences",
+        )
+
+    @staticmethod
+    def export_sequence_map_identity_1_sequence_1_tensor():  # type: () -> None
+        body = onnx.helper.make_graph(
+            [
+                onnx.helper.make_node("Identity", ["in0"], ["out0"]),
+                onnx.helper.make_node("Identity", ["in1"], ["out1"]),
+            ],
+            "seq_map_body",
+            [
+                onnx.helper.make_tensor_value_info(
+                    "in0", onnx.TensorProto.FLOAT, ["N"]
+                ),
+                onnx.helper.make_tensor_value_info(
+                    "in1", onnx.TensorProto.FLOAT, ["M"]
+                ),
+            ],
+            [
+                onnx.helper.make_tensor_value_info(
+                    "out0", onnx.TensorProto.FLOAT, ["N"]
+                ),
+                onnx.helper.make_tensor_value_info(
+                    "out1", onnx.TensorProto.FLOAT, ["M"]
+                ),
+            ],
+        )
+
+        node = onnx.helper.make_node(
+            "SequenceMap", inputs=["x0", "x1"], outputs=["y0", "y1"], body=body
+        )
+
+        x0 = [
+            np.random.uniform(0.0, 1.0, np.random.randint(1, 10)).astype(np.float32)
+            for _ in range(3)
+        ]
+        x1 = np.random.uniform(0.0, 1.0, np.random.randint(1, 10)).astype(np.float32)
+        y0 = x0
+        y1 = [x1 for _ in range(3)]
+        input_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+            onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["M"]),
+        ]
+        output_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["M"])
+            ),
+        ]
+        expect(
+            node,
+            inputs=[x0, x1],
+            outputs=[y0, y1],
+            input_type_protos=input_type_protos,
+            output_type_protos=output_type_protos,
+            name="test_sequence_map_identity_1_sequence_1_tensor",
+        )
+
+    @staticmethod
+    def export_sequence_map_add_2_sequences():  # type: () -> None
+        body = onnx.helper.make_graph(
+            [onnx.helper.make_node("Add", ["in0", "in1"], ["out0"])],
+            "seq_map_body",
+            [
+                onnx.helper.make_tensor_value_info(
+                    "in0", onnx.TensorProto.FLOAT, ["N"]
+                ),
+                onnx.helper.make_tensor_value_info(
+                    "in1", onnx.TensorProto.FLOAT, ["N"]
+                ),
+            ],
+            [onnx.helper.make_tensor_value_info("out0", onnx.TensorProto.FLOAT, ["N"])],
+        )
+
+        node = onnx.helper.make_node(
+            "SequenceMap", inputs=["x0", "x1"], outputs=["y0"], body=body
+        )
+
+        N = [np.random.randint(1, 10) for _ in range(3)]
+        x0 = [np.random.uniform(0.0, 1.0, N[k]).astype(np.float32) for k in range(3)]
+        x1 = [np.random.uniform(0.0, 1.0, N[k]).astype(np.float32) for k in range(3)]
+        y0 = [x0[k] + x1[k] for k in range(3)]
+        input_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+        ]
+        output_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+        ]
+        expect(
+            node,
+            inputs=[x0, x1],
+            outputs=[y0],
+            input_type_protos=input_type_protos,
+            output_type_protos=output_type_protos,
+            name="test_sequence_map_add_2_sequences",
+        )
+
+    @staticmethod
+    def export_sequence_map_add_1_sequence_1_tensor():  # type: () -> None
+        body = onnx.helper.make_graph(
+            [onnx.helper.make_node("Add", ["in0", "in1"], ["out0"])],
+            "seq_map_body",
+            [
+                onnx.helper.make_tensor_value_info(
+                    "in0", onnx.TensorProto.FLOAT, ["N"]
+                ),
+                onnx.helper.make_tensor_value_info(
+                    "in1", onnx.TensorProto.FLOAT, ["N"]
+                ),
+            ],
+            [onnx.helper.make_tensor_value_info("out0", onnx.TensorProto.FLOAT, ["N"])],
+        )
+
+        node = onnx.helper.make_node(
+            "SequenceMap", inputs=["x0", "x1"], outputs=["y0"], body=body
+        )
+
+        x0 = [np.random.uniform(0.0, 1.0, 10).astype(np.float32) for k in range(3)]
+        x1 = np.random.uniform(0.0, 1.0, 10).astype(np.float32)
+        y0 = [x0[i] + x1 for i in range(3)]
+        input_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+            onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"]),
+        ]
+        output_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ["N"])
+            ),
+        ]
+        expect(
+            node,
+            inputs=[x0, x1],
+            outputs=[y0],
+            input_type_protos=input_type_protos,
+            output_type_protos=output_type_protos,
+            name="test_sequence_map_add_1_sequence_1_tensor",
+        )
+
+    @staticmethod
+    def export_sequence_map_extract_shapes():  # type: () -> None
+        body = onnx.helper.make_graph(
+            [onnx.helper.make_node("Shape", ["x"], ["shape"])],
+            "seq_map_body",
+            [
+                onnx.helper.make_tensor_value_info(
+                    "x", onnx.TensorProto.FLOAT, ["H", "W", "C"]
+                )
+            ],
+            [onnx.helper.make_tensor_value_info("shape", onnx.TensorProto.INT64, [3])],
+        )
+
+        node = onnx.helper.make_node(
+            "SequenceMap", inputs=["in_seq"], outputs=["shapes"], body=body
+        )
+
+        shapes = [
+            np.array([40, 30, 3], dtype=np.int64),
+            np.array([20, 10, 3], dtype=np.int64),
+            np.array([10, 5, 3], dtype=np.int64),
+        ]
+        x0 = [np.zeros(shape, dtype=np.float32) for shape in shapes]
+        input_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(
+                    onnx.TensorProto.FLOAT, ["H", "W", "C"]
+                )
+            ),
+        ]
+        output_type_protos = [
+            onnx.helper.make_sequence_type_proto(
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.INT64, [3])
+            ),
+        ]
+        expect(
+            node,
+            inputs=[x0],
+            outputs=[shapes],
+            input_type_protos=input_type_protos,
+            output_type_protos=output_type_protos,
+            name="test_sequence_map_extract_shapes",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sequenceinsert.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sequenceinsert.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac50aab17d89125c91288c44b6543cd9388137be
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sequenceinsert.py
@@ -0,0 +1,75 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def sequence_insert_reference_implementation(
+    sequence: list[Any], tensor: np.ndarray, position: np.ndarray = None
+) -> list[Any]:
+    # make a copy of input sequence
+    seq = list(sequence)
+    if position is not None:
+        # In these cases, insert_position will be between [-len(sequence), len(sequence)]
+        # The position argument will be in the format np.array([pos_index])
+        insert_position = position[0]
+        seq.insert(insert_position, tensor)
+    else:
+        # Default position of insertion is at the end of the sequence.
+        seq.append(tensor)
+    return seq
+
+
+class SequenceInsert(Base):
+    @staticmethod
+    def export() -> None:
+        test_cases = {
+            "at_back": [np.array([10, 11, 12]).astype(np.int64)],
+            "at_front": [np.array([-2, -1, 0]), np.array([0]).astype(np.int64)],
+        }
+        sequence = [
+            np.array([1, 2, 3, 4]).astype(np.int64),
+            np.array([5, 6, 7]).astype(np.int64),
+            np.array([8, 9]).astype(np.int64),
+        ]
+
+        for test_name, test_inputs in test_cases.items():
+            tensor = test_inputs[0].astype(np.int64)
+
+            if len(test_inputs) > 1:
+                node = onnx.helper.make_node(
+                    "SequenceInsert",
+                    inputs=["sequence", "tensor", "position"],
+                    outputs=["output_sequence"],
+                )
+                position = test_inputs[1]
+                inserted = sequence_insert_reference_implementation(
+                    sequence, tensor, position
+                )
+                expect(
+                    node,
+                    inputs=[sequence, tensor, position],
+                    outputs=[inserted],
+                    name="test_sequence_insert_" + test_name,
+                )
+            else:
+                node = onnx.helper.make_node(
+                    "SequenceInsert",
+                    inputs=["sequence", "tensor"],
+                    outputs=["output_sequence"],
+                )
+                inserted = sequence_insert_reference_implementation(sequence, tensor)
+                expect(
+                    node,
+                    inputs=[sequence, tensor],
+                    outputs=[inserted],
+                    name="test_sequence_insert_" + test_name,
+                )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/shape.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/shape.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bd79e28f1617be254faeb31d0c1155b76173a8f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/shape.py
@@ -0,0 +1,60 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+# Reference implementation of shape op
+def shape_reference_impl(x, start=None, end=None):  # type: ignore
+    dims = x.shape[start:end]
+    return np.array(dims).astype(np.int64)
+
+
+def test_shape(testname, xval, start=None, end=None):  # type: ignore
+    node = onnx.helper.make_node(
+        "Shape", inputs=["x"], outputs=["y"], start=start, end=end
+    )
+
+    yval = shape_reference_impl(xval, start, end)
+
+    expect(node, inputs=[xval], outputs=[yval], name="test_shape" + testname)
+
+
+class Shape(Base):
+    @staticmethod
+    def export() -> None:
+        x = np.array(
+            [
+                [1, 2, 3],
+                [4, 5, 6],
+            ]
+        ).astype(np.float32)
+        test_shape("_example", x)  # preserve names of original test cases
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+
+        test_shape("", x)  # preserve names of original test cases
+
+        test_shape("_start_1", x, start=1)
+
+        test_shape("_end_1", x, end=1)
+
+        test_shape("_start_negative_1", x, start=-1)
+
+        test_shape("_end_negative_1", x, end=-1)
+
+        test_shape("_start_1_end_negative_1", x, start=1, end=-1)
+
+        test_shape("_start_1_end_2", x, start=1, end=2)
+
+        test_shape("_clip_start", x, start=-10)
+
+        test_shape("_clip_end", x, end=10)
+
+        test_shape("_start_greater_than_end", x, start=2, end=1)
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/shrink.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/shrink.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d7627e16f84db996a35a052f97790ba2f4e3025
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/shrink.py
@@ -0,0 +1,37 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Shrink(Base):
+    @staticmethod
+    def export_hard_shrink() -> None:
+        node = onnx.helper.make_node(
+            "Shrink",
+            inputs=["x"],
+            outputs=["y"],
+            lambd=1.5,
+        )
+        X = np.arange(-2.0, 2.1, dtype=np.float32)
+        Y = np.array([-2, 0, 0, 0, 2], dtype=np.float32)
+        expect(node, inputs=[X], outputs=[Y], name="test_shrink_hard")
+
+    @staticmethod
+    def export_soft_shrink() -> None:
+        node = onnx.helper.make_node(
+            "Shrink",
+            inputs=["x"],
+            outputs=["y"],
+            lambd=1.5,
+            bias=1.5,
+        )
+        X = np.arange(-2.0, 2.1, dtype=np.float32)
+        Y = np.array([-0.5, 0, 0, 0, 0.5], dtype=np.float32)
+        expect(node, inputs=[X], outputs=[Y], name="test_shrink_soft")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sigmoid.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sigmoid.py
new file mode 100644
index 0000000000000000000000000000000000000000..1010b3ee1062a9097a1a5adb9a7177570459d4a4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sigmoid.py
@@ -0,0 +1,30 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Sigmoid(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Sigmoid",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = 1.0 / (
+            1.0 + np.exp(np.negative(x))
+        )  # expected output [0.26894143, 0.5, 0.7310586]
+        expect(node, inputs=[x], outputs=[y], name="test_sigmoid_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = 1.0 / (1.0 + np.exp(np.negative(x)))
+        expect(node, inputs=[x], outputs=[y], name="test_sigmoid")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sign.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sign.py
new file mode 100644
index 0000000000000000000000000000000000000000..8be10f4ae6de2e38739bf521c6c3123c7cdbb4d5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sign.py
@@ -0,0 +1,24 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Sign(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Sign",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array(range(-5, 6)).astype(np.float32)
+        y = np.sign(x)
+        expect(node, inputs=[x], outputs=[y], name="test_sign")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sin.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sin.py
new file mode 100644
index 0000000000000000000000000000000000000000..5aed7fbd85d2fd32a0be4eb39ac73974313affe8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sin.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Sin(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Sin",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.sin(x)
+        expect(node, inputs=[x], outputs=[y], name="test_sin_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.sin(x)
+        expect(node, inputs=[x], outputs=[y], name="test_sin")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sinh.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sinh.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b4b6738f471e3d7f9a19e7131fa8e9452580ee
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sinh.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Sinh(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Sinh",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.sinh(x)  # expected output [-1.17520118,  0.,  1.17520118]
+        expect(node, inputs=[x], outputs=[y], name="test_sinh_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.sinh(x)
+        expect(node, inputs=[x], outputs=[y], name="test_sinh")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/size.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/size.py
new file mode 100644
index 0000000000000000000000000000000000000000..f88387b4535f32e31e296f2f3937d6bbe864a63a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/size.py
@@ -0,0 +1,35 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Size(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Size",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array(
+            [
+                [1, 2, 3],
+                [4, 5, 6],
+            ]
+        ).astype(np.float32)
+        y = np.array(6).astype(np.int64)
+
+        expect(node, inputs=[x], outputs=[y], name="test_size_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.array(x.size).astype(np.int64)
+
+        expect(node, inputs=[x], outputs=[y], name="test_size")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/slice.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a11e99229c2aba626662dfea5b52c6c5e9c59af
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/slice.py
@@ -0,0 +1,178 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Slice(Base):
+    @staticmethod
+    def export_slice() -> None:
+        node = onnx.helper.make_node(
+            "Slice",
+            inputs=["x", "starts", "ends", "axes", "steps"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(20, 10, 5).astype(np.float32)
+        y = x[0:3, 0:10]
+        starts = np.array([0, 0], dtype=np.int64)
+        ends = np.array([3, 10], dtype=np.int64)
+        axes = np.array([0, 1], dtype=np.int64)
+        steps = np.array([1, 1], dtype=np.int64)
+
+        expect(
+            node, inputs=[x, starts, ends, axes, steps], outputs=[y], name="test_slice"
+        )
+
+    @staticmethod
+    def export_slice_neg() -> None:
+        node = onnx.helper.make_node(
+            "Slice",
+            inputs=["x", "starts", "ends", "axes", "steps"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(20, 10, 5).astype(np.float32)
+        starts = np.array([0], dtype=np.int64)
+        ends = np.array([-1], dtype=np.int64)
+        axes = np.array([1], dtype=np.int64)
+        steps = np.array([1], dtype=np.int64)
+        y = x[:, 0:-1]
+
+        expect(
+            node,
+            inputs=[x, starts, ends, axes, steps],
+            outputs=[y],
+            name="test_slice_neg",
+        )
+
+    @staticmethod
+    def export_slice_start_out_of_bounds() -> None:
+        node = onnx.helper.make_node(
+            "Slice",
+            inputs=["x", "starts", "ends", "axes", "steps"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(20, 10, 5).astype(np.float32)
+        starts = np.array([1000], dtype=np.int64)
+        ends = np.array([1000], dtype=np.int64)
+        axes = np.array([1], dtype=np.int64)
+        steps = np.array([1], dtype=np.int64)
+        y = x[:, 1000:1000]
+
+        expect(
+            node,
+            inputs=[x, starts, ends, axes, steps],
+            outputs=[y],
+            name="test_slice_start_out_of_bounds",
+        )
+
+    @staticmethod
+    def export_slice_end_out_of_bounds() -> None:
+        node = onnx.helper.make_node(
+            "Slice",
+            inputs=["x", "starts", "ends", "axes", "steps"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(20, 10, 5).astype(np.float32)
+        starts = np.array([1], dtype=np.int64)
+        ends = np.array([1000], dtype=np.int64)
+        axes = np.array([1], dtype=np.int64)
+        steps = np.array([1], dtype=np.int64)
+        y = x[:, 1:1000]
+
+        expect(
+            node,
+            inputs=[x, starts, ends, axes, steps],
+            outputs=[y],
+            name="test_slice_end_out_of_bounds",
+        )
+
+    @staticmethod
+    def export_slice_default_axes() -> None:
+        node = onnx.helper.make_node(
+            "Slice",
+            inputs=["x", "starts", "ends"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(20, 10, 5).astype(np.float32)
+        starts = np.array([0, 0, 3], dtype=np.int64)
+        ends = np.array([20, 10, 4], dtype=np.int64)
+        y = x[:, :, 3:4]
+
+        expect(
+            node, inputs=[x, starts, ends], outputs=[y], name="test_slice_default_axes"
+        )
+
+    @staticmethod
+    def export_slice_default_steps() -> None:
+        node = onnx.helper.make_node(
+            "Slice",
+            inputs=["x", "starts", "ends", "axes"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(20, 10, 5).astype(np.float32)
+        starts = np.array([0, 0, 3], dtype=np.int64)
+        ends = np.array([20, 10, 4], dtype=np.int64)
+        axes = np.array([0, 1, 2], dtype=np.int64)
+        y = x[:, :, 3:4]
+
+        expect(
+            node,
+            inputs=[x, starts, ends, axes],
+            outputs=[y],
+            name="test_slice_default_steps",
+        )
+
+    @staticmethod
+    def export_slice_neg_steps() -> None:
+        node = onnx.helper.make_node(
+            "Slice",
+            inputs=["x", "starts", "ends", "axes", "steps"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(20, 10, 5).astype(np.float32)
+        starts = np.array([20, 10, 4], dtype=np.int64)
+        ends = np.array([0, 0, 1], dtype=np.int64)
+        axes = np.array([0, 1, 2], dtype=np.int64)
+        steps = np.array([-1, -3, -2]).astype(np.int64)
+        y = x[20:0:-1, 10:0:-3, 4:1:-2]
+
+        expect(
+            node,
+            inputs=[x, starts, ends, axes, steps],
+            outputs=[y],
+            name="test_slice_neg_steps",
+        )
+
+    @staticmethod
+    def export_slice_negative_axes() -> None:
+        node = onnx.helper.make_node(
+            "Slice",
+            inputs=["x", "starts", "ends", "axes"],
+            outputs=["y"],
+        )
+
+        x = np.random.randn(20, 10, 5).astype(np.float32)
+        starts = np.array([0, 0, 3], dtype=np.int64)
+        ends = np.array([20, 10, 4], dtype=np.int64)
+        axes = np.array([0, -2, -1], dtype=np.int64)
+        y = x[:, :, 3:4]
+
+        expect(
+            node,
+            inputs=[x, starts, ends, axes],
+            outputs=[y],
+            name="test_slice_negative_axes",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softmax.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softmax.py
new file mode 100644
index 0000000000000000000000000000000000000000..e01efba8e3029326aff7e133a8e4839f769ae847
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softmax.py
@@ -0,0 +1,91 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def softmax(x: np.ndarray, axis: int = -1) -> np.ndarray:
+    x_max = np.max(x, axis=axis, keepdims=True)
+    tmp = np.exp(x - x_max)
+    s = np.sum(tmp, axis=axis, keepdims=True)
+    return tmp / s
+
+
+class Softmax(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Softmax",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        x = np.array([[-1, 0, 1]]).astype(np.float32)
+        # expected output [[0.09003058, 0.24472848, 0.66524094]]
+        y = softmax(x, axis=1)
+        expect(node, inputs=[x], outputs=[y], name="test_softmax_example")
+
+    @staticmethod
+    def export_softmax_axis() -> None:
+        x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)
+        # expected output
+        # [[0.032058604 0.08714432  0.23688284  0.6439143  ]
+        # [0.032058604 0.08714432  0.23688284  0.6439143  ]]
+        y = softmax(x)
+
+        node = onnx.helper.make_node(
+            "Softmax",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        expect(node, inputs=[x], outputs=[y], name="test_softmax_large_number")
+
+        x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
+        node = onnx.helper.make_node(
+            "Softmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=0,
+        )
+        y = softmax(x, axis=0)
+        expect(node, inputs=[x], outputs=[y], name="test_softmax_axis_0")
+
+        node = onnx.helper.make_node(
+            "Softmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=1,
+        )
+        y = softmax(x, axis=1)
+        expect(node, inputs=[x], outputs=[y], name="test_softmax_axis_1")
+
+        node = onnx.helper.make_node(
+            "Softmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=2,
+        )
+        y = softmax(x, axis=2)
+        expect(node, inputs=[x], outputs=[y], name="test_softmax_axis_2")
+
+        node = onnx.helper.make_node(
+            "Softmax",
+            inputs=["x"],
+            outputs=["y"],
+            axis=-1,
+        )
+        y = softmax(x, axis=-1)
+        expect(node, inputs=[x], outputs=[y], name="test_softmax_negative_axis")
+
+        # default axis is -1
+        node = onnx.helper.make_node(
+            "Softmax",
+            inputs=["x"],
+            outputs=["y"],
+        )
+        expect(node, inputs=[x], outputs=[y], name="test_softmax_default_axis")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softmaxcrossentropy.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softmaxcrossentropy.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeeaded119fbcf912f6e3c052ebc8fb212f2b105
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softmaxcrossentropy.py
@@ -0,0 +1,1152 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def softmaxcrossentropy(
+    x, target, weight=None, reduction="mean", ignore_index=None, get_log_prob=None
+):  # type: ignore
+    input_shape = x.shape
+    if len(input_shape) == 1:
+        raise RuntimeError("Unsupported shape")
+
+    target_shape = target.shape
+    N = input_shape[0]
+    C = input_shape[1]
+
+    # compute log_softmax
+    max_x = np.max(x, axis=1, keepdims=True)
+    exp_x = np.exp(x - max_x)
+    p = exp_x / np.sum(exp_x, axis=1, keepdims=True)
+    inp = np.log(p)
+    log_prob = None
+    if get_log_prob is True:
+        log_prob = np.copy(inp)
+
+    # initialize the positional weights when required
+    gather_weight = None
+    if weight is not None:
+        # setting mode='clip' to deal with ignore_index > C or < 0 cases.
+        # when the target value is > C or < 0, it doesn't matter which value we are
+        # taking in gather_weight, since it will be set to 0 in the following if-block
+        # use np.int32 to make it compatible with x86 machines
+        gather_weight = np.take(weight, np.array(target, dtype=np.int32), mode="clip")
+        # set `ignore_index`'s loss weight to 0.
+        # The loss tensor will be multiplied by this weight tensor,
+        # so `ingore_index`'s loss value will be eliminated.
+        if ignore_index is not None:
+            gather_weight = np.where(target == ignore_index, 0, gather_weight).astype(
+                dtype=np.float32
+            )
+    elif ignore_index is not None:
+        gather_weight = np.where(target == ignore_index, 0, 1).astype(dtype=np.float32)
+
+    # if input is 4-d and above, make it 3-d
+    if len(input_shape) != 3:
+        inp = inp.reshape((N, C, -1))
+        target = target.reshape((N, -1))
+
+    # Get a dimension from the reshaped input.
+    # If the original input shape is [N, C, H, W],
+    # the D here should be H * W because we reshape
+    # [N, C, H, W] to [N, C, H * W].
+    D = inp.shape[2]
+    neg_gather_element_input = np.zeros((N, D), dtype=np.float32)
+    for i in range(N):
+        for d in range(D):
+            if target[i][d] != ignore_index:
+                neg_gather_element_input[i][d] = -inp[i][target[i][d]][d]
+
+    loss = neg_gather_element_input
+
+    # if the input was 4-d or above reshape to the right shape
+    if len(input_shape) != 3:
+        loss = loss.reshape(target_shape)
+
+    # apply the weights when required
+    if gather_weight is not None:
+        loss = gather_weight * loss
+        if reduction == "mean":
+            loss = loss.sum() / gather_weight.sum()
+            if get_log_prob is True:
+                return loss, log_prob
+            else:
+                return loss
+
+    if reduction == "mean":
+        loss = np.mean(loss)
+    elif reduction == "sum":
+        loss = np.sum(loss)
+
+    if get_log_prob:
+        return loss, log_prob
+    return loss
+
+
+class SoftmaxCrossEntropyLoss(Base):
+    @staticmethod
+    def export_softmaxcrossentropy_none() -> None:
+        # Define operator attributes.
+        reduction = "none"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, labels, reduction="none")
+
+        # Check results
+        expect(node, inputs=[x, labels], outputs=[sce], name="test_sce_none")
+
+    @staticmethod
+    def export_softmaxcrossentropy_none_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "none"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, reduction="none", get_log_prob=True
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[loss, log_prob],
+            name="test_sce_none_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_none_weights() -> None:
+        # Define operator attributes.
+        reduction = "none"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+        weights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, labels, weight=weights, reduction="none")
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[sce],
+            name="test_sce_none_weights",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_none_weights_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "none"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+        weights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, weight=weights, reduction="none", get_log_prob=True
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[loss, log_prob],
+            name="test_sce_none_weights_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_sum() -> None:
+        # Define operator attributes.
+        reduction = "sum"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, labels, reduction="sum")
+
+        # Check results
+        expect(node, inputs=[x, labels], outputs=[sce], name="test_sce_sum")
+
+    @staticmethod
+    def export_softmaxcrossentropy_sum_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "sum"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, reduction="sum", get_log_prob=True
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[loss, log_prob],
+            name="test_sce_sum_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, labels)
+
+        # Check results
+        expect(node, inputs=[x, labels], outputs=[sce], name="test_sce_mean")
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(x, labels, get_log_prob=True)
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[loss, log_prob],
+            name="test_sce_mean_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_3d() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2).astype(np.float32)
+        y = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, y)
+
+        # Check results
+        expect(node, inputs=[x, y], outputs=[sce], name="test_sce_mean_3d")
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_3d_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2).astype(np.float32)
+        y = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(x, y, get_log_prob=True)
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, y],
+            outputs=[loss, log_prob],
+            name="test_sce_mean_3d_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_weights() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+        weights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, labels, weight=weights)
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[sce],
+            name="test_sce_mean_weight",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_weights_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+        weights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, weight=weights, get_log_prob=True
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[loss, log_prob],
+            name="test_sce_mean_weight_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_weights_ii() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(0)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+        labels[0] = np.int64(0)
+        weights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index)
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[sce],
+            name="test_sce_mean_weight_ii",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_weights_ii_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(0)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+        labels[0] = np.int64(0)
+        weights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, weight=weights, ignore_index=ignore_index, get_log_prob=True
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[loss, log_prob],
+            name="test_sce_mean_weight_ii_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_no_weights_ii() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(2)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+        labels[0] = np.int64(2)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, labels, ignore_index=ignore_index)
+
+        # Check results
+        expect(
+            node, inputs=[x, labels], outputs=[sce], name="test_sce_mean_no_weight_ii"
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_no_weights_ii_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(2)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3,)).astype(np.int64)
+        labels[0] = np.int64(2)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, ignore_index=ignore_index, get_log_prob=True
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[loss, log_prob],
+            name="test_sce_mean_no_weight_ii_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_weights_ii_3d() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(1)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)
+        labels[0][0] = np.int64(1)
+        weights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index)
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[sce],
+            name="test_sce_mean_weight_ii_3d",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_weights_ii_3d_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(1)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)
+        labels[0][0] = np.int64(1)
+        weights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, weight=weights, ignore_index=ignore_index, get_log_prob=True
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[loss, log_prob],
+            name="test_sce_mean_weight_ii_3d_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_no_weights_ii_3d() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(2)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)
+        labels[0][0] = np.int64(2)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(x, labels, ignore_index=ignore_index)
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[sce],
+            name="test_sce_mean_no_weight_ii_3d",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_no_weights_ii_3d_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(2)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3, 2)).astype(np.int64)
+        labels[0][0] = np.int64(2)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, ignore_index=ignore_index, get_log_prob=True
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[loss, log_prob],
+            name="test_sce_mean_no_weight_ii_3d_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_weights_ii_4d() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(2)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2, 7).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)
+        labels[0][0][0] = np.int64(2)
+        weights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(
+            x, labels, reduction=reduction, weight=weights, ignore_index=ignore_index
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[sce],
+            name="test_sce_mean_weight_ii_4d",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_weights_ii_4d_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(2)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2, 7).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)
+        labels[0][0][0] = np.int64(2)
+        weights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x,
+            labels,
+            reduction=reduction,
+            weight=weights,
+            ignore_index=ignore_index,
+            get_log_prob=True,
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels, weights],
+            outputs=[loss, log_prob],
+            name="test_sce_mean_weight_ii_4d_log_prob",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_no_weights_ii_4d() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(2)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2, 7).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)
+        labels[0][0][0] = np.int64(2)
+
+        # Compute SoftmaxCrossEntropyLoss
+        sce = softmaxcrossentropy(
+            x, labels, reduction=reduction, ignore_index=ignore_index
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[sce],
+            name="test_sce_mean_no_weight_ii_4d",
+        )
+
+    @staticmethod
+    def export_softmaxcrossentropy_mean_no_weights_ii_4d_log_prob() -> None:
+        # Define operator attributes.
+        reduction = "mean"
+        ignore_index = np.int64(2)
+
+        # Create operator.
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        # Define operator inputs.
+        np.random.seed(0)
+        x = np.random.rand(3, 5, 2, 7).astype(np.float32)
+        labels = np.random.randint(0, high=5, size=(3, 2, 7)).astype(np.int64)
+        labels[0][0][0] = np.int64(2)
+
+        # Compute SoftmaxCrossEntropyLoss
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, reduction=reduction, ignore_index=ignore_index, get_log_prob=True
+        )
+
+        # Check results
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[loss, log_prob],
+            name="test_sce_mean_no_weight_ii_4d_log_prob",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3d4d5_mean_weight() -> None:
+        reduction = "mean"
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4
+        np.random.seed(0)
+        x = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)
+        labels = np.random.randint(
+            0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)
+        ).astype(np.int64)
+        weight = np.random.rand(C).astype(np.float32)
+
+        sce = softmaxcrossentropy(x, labels, weight=weight, reduction=reduction)
+
+        expect(
+            node,
+            inputs=[x, labels, weight],
+            outputs=[sce],
+            name="test_sce_NCd1d2d3d4d5_mean_weight",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3d4d5_mean_weight_log_prob() -> None:
+        reduction = "mean"
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4
+        np.random.seed(0)
+        x = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)
+        labels = np.random.randint(
+            0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)
+        ).astype(np.int64)
+        weight = np.random.rand(C).astype(np.float32)
+
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, weight=weight, reduction=reduction, get_log_prob=True
+        )
+
+        expect(
+            node,
+            inputs=[x, labels, weight],
+            outputs=[loss, log_prob],
+            name="test_sce_NCd1d2d3d4d5_mean_weight_log_prob",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3d4d5_none_no_weight() -> None:
+        reduction = "none"
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4
+        np.random.seed(0)
+        x = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)
+        labels = np.random.randint(
+            0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)
+        ).astype(np.int64)
+
+        sce = softmaxcrossentropy(x, labels, reduction=reduction)
+
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[sce],
+            name="test_sce_NCd1d2d3d4d5_none_no_weight",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3d4d5_none_no_weight_log_prob() -> None:
+        reduction = "none"
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+        )
+
+        N, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4
+        np.random.seed(0)
+        x = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)
+        labels = np.random.randint(
+            0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5)
+        ).astype(np.int64)
+
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, reduction=reduction, get_log_prob=True
+        )
+
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[loss, log_prob],
+            name="test_sce_NCd1d2d3d4d5_none_no_weight_log_prob",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1_mean_weight_negative_ii() -> None:
+        reduction = "mean"
+        ignore_index = np.int64(-1)
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, dim1 = 3, 5, 6
+        np.random.seed(0)
+        x = np.random.rand(N, C, dim1).astype(np.float32)
+        labels = np.random.randint(0, high=C, size=(N, dim1)).astype(np.int64)
+        labels[0][0] = -1
+        weight = np.random.rand(C).astype(np.float32)
+
+        sce = softmaxcrossentropy(
+            x, labels, weight=weight, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[x, labels, weight],
+            outputs=[sce],
+            name="test_sce_NCd1_mean_weight_negative_ii",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1_mean_weight_negative_ii_log_prob() -> None:
+        reduction = "mean"
+        ignore_index = np.int64(-1)
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, dim1 = 3, 5, 6
+        np.random.seed(0)
+        x = np.random.rand(N, C, dim1).astype(np.float32)
+        labels = np.random.randint(0, high=C, size=(N, dim1)).astype(np.int64)
+        labels[0][0] = -1
+        weight = np.random.rand(C).astype(np.float32)
+
+        loss, log_prob = softmaxcrossentropy(
+            x,
+            labels,
+            weight=weight,
+            reduction=reduction,
+            ignore_index=ignore_index,
+            get_log_prob=True,
+        )
+
+        expect(
+            node,
+            inputs=[x, labels, weight],
+            outputs=[loss, log_prob],
+            name="test_sce_NCd1_mean_weight_negative_ii_log_prob",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3_none_no_weight_negative_ii() -> None:
+        reduction = "none"
+        ignore_index = np.int64(-5)
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5
+        np.random.seed(0)
+        x = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)
+        labels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3)).astype(
+            np.int64
+        )
+        labels[0][0][0][0] = -5
+
+        sce = softmaxcrossentropy(
+            x, labels, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[sce],
+            name="test_sce_NCd1d2d3_none_no_weight_negative_ii",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3_none_no_weight_negative_ii_log_prob() -> None:
+        reduction = "none"
+        ignore_index = np.int64(-5)
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5
+        np.random.seed(0)
+        x = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)
+        labels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3)).astype(
+            np.int64
+        )
+        labels[0][0][0][0] = -5
+
+        loss, log_prob = softmaxcrossentropy(
+            x, labels, reduction=reduction, ignore_index=ignore_index, get_log_prob=True
+        )
+
+        expect(
+            node,
+            inputs=[x, labels],
+            outputs=[loss, log_prob],
+            name="test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3_sum_weight_high_ii() -> None:
+        reduction = "sum"
+        ignore_index = np.int64(10)
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C = 3, 5
+        np.random.seed(0)
+        x = np.random.rand(N, C).astype(np.float32)
+        labels = np.random.randint(0, high=C, size=(N)).astype(np.int64)
+        labels[0] = 10
+        weight = np.random.rand(C).astype(np.float32)
+
+        sce = softmaxcrossentropy(
+            x, labels, weight=weight, reduction=reduction, ignore_index=ignore_index
+        )
+
+        expect(
+            node,
+            inputs=[x, labels, weight],
+            outputs=[sce],
+            name="test_sce_NCd1d2d3_sum_weight_high_ii",
+        )
+
+    @staticmethod
+    def export_input_shape_is_NCd1d2d3_sum_weight_high_ii_log_prob() -> None:
+        reduction = "sum"
+        ignore_index = np.int64(10)
+
+        node = onnx.helper.make_node(
+            "SoftmaxCrossEntropyLoss",
+            inputs=["x", "y", "w"],
+            outputs=["z", "log_prob"],
+            reduction=reduction,
+            ignore_index=ignore_index,
+        )
+
+        N, C = 3, 5
+        np.random.seed(0)
+        x = np.random.rand(N, C).astype(np.float32)
+        labels = np.random.randint(0, high=C, size=(N)).astype(np.int64)
+        labels[0] = 10
+        weight = np.random.rand(C).astype(np.float32)
+
+        loss, log_prob = softmaxcrossentropy(
+            x,
+            labels,
+            weight=weight,
+            reduction=reduction,
+            ignore_index=ignore_index,
+            get_log_prob=True,
+        )
+
+        expect(
+            node,
+            inputs=[x, labels, weight],
+            outputs=[loss, log_prob],
+            name="test_sce_NCd1d2d3_sum_weight_high_ii_log_prob",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softplus.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softplus.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4581c0f2a4520bc7988604ae88aab6ed5c3403e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softplus.py
@@ -0,0 +1,30 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Softplus(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Softplus",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.log(
+            np.exp(x) + 1
+        )  # expected output [0.31326166, 0.69314718, 1.31326163]
+        expect(node, inputs=[x], outputs=[y], name="test_softplus_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.log(np.exp(x) + 1)
+        expect(node, inputs=[x], outputs=[y], name="test_softplus")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softsign.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softsign.py
new file mode 100644
index 0000000000000000000000000000000000000000..759ceb6f039c41f6f6811082807ef1c463255f45
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/softsign.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Softsign(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Softsign",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.array([-0.5, 0, 0.5]).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_softsign_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = x / (1 + np.abs(x))
+        expect(node, inputs=[x], outputs=[y], name="test_softsign")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/spacetodepth.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/spacetodepth.py
new file mode 100644
index 0000000000000000000000000000000000000000..f74a6e5a8c04dca06828a4a8ab1cd399181e9ff2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/spacetodepth.py
@@ -0,0 +1,66 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class SpaceToDepth(Base):
+    @staticmethod
+    def export() -> None:
+        b, c, h, w = shape = (2, 2, 6, 6)
+        blocksize = 2
+        node = onnx.helper.make_node(
+            "SpaceToDepth",
+            inputs=["x"],
+            outputs=["y"],
+            blocksize=blocksize,
+        )
+        x = np.random.random_sample(shape).astype(np.float32)
+        tmp = np.reshape(
+            x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize]
+        )
+        tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
+        y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
+        expect(node, inputs=[x], outputs=[y], name="test_spacetodepth")
+
+    @staticmethod
+    def export_example() -> None:
+        node = onnx.helper.make_node(
+            "SpaceToDepth",
+            inputs=["x"],
+            outputs=["y"],
+            blocksize=2,
+        )
+
+        # (1, 1, 4, 6) input tensor
+        x = np.array(
+            [
+                [
+                    [
+                        [0, 6, 1, 7, 2, 8],
+                        [12, 18, 13, 19, 14, 20],
+                        [3, 9, 4, 10, 5, 11],
+                        [15, 21, 16, 22, 17, 23],
+                    ]
+                ]
+            ]
+        ).astype(np.float32)
+
+        # (1, 4, 2, 3) output tensor
+        y = np.array(
+            [
+                [
+                    [[0, 1, 2], [3, 4, 5]],
+                    [[6, 7, 8], [9, 10, 11]],
+                    [[12, 13, 14], [15, 16, 17]],
+                    [[18, 19, 20], [21, 22, 23]],
+                ]
+            ]
+        ).astype(np.float32)
+        expect(node, inputs=[x], outputs=[y], name="test_spacetodepth_example")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/split.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/split.py
new file mode 100644
index 0000000000000000000000000000000000000000..99587806c70827d3ab04fbc14baf4cd77963246a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/split.py
@@ -0,0 +1,378 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Split(Base):
+    @staticmethod
+    def export_1d_opset13() -> None:
+        node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input"],
+            outputs=["output_1", "output_2", "output_3"],
+            axis=0,
+        )
+
+        expected_outputs = [
+            np.array([1.0, 2.0]).astype(np.float32),
+            np.array([3.0, 4.0]).astype(np.float32),
+            np.array([5.0, 6.0]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input],
+            outputs=expected_outputs,
+            name="test_split_equal_parts_1d_opset13",
+            opset_imports=[onnx.helper.make_opsetid("", 13)],
+        )
+
+        split = np.array([2, 4]).astype(np.int64)
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input", "split"],
+            outputs=["output_1", "output_2"],
+            axis=0,
+        )
+
+        expected_outputs = [
+            np.array([1.0, 2.0]).astype(np.float32),
+            np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input, split],
+            outputs=expected_outputs,
+            name="test_split_variable_parts_1d_opset13",
+            opset_imports=[onnx.helper.make_opsetid("", 13)],
+        )
+
+    @staticmethod
+    def export_2d_opset13() -> None:
+        node_input = np.array(
+            [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]
+        ).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "Split", inputs=["input"], outputs=["output_1", "output_2"], axis=1
+        )
+
+        expected_outputs = [
+            np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),
+            np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),
+        ]
+
+        expect(
+            node,
+            inputs=[node_input],
+            outputs=expected_outputs,
+            name="test_split_equal_parts_2d_opset13",
+            opset_imports=[onnx.helper.make_opsetid("", 13)],
+        )
+
+        split = np.array([2, 4]).astype(np.int64)
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input", "split"],
+            outputs=["output_1", "output_2"],
+            axis=1,
+        )
+
+        expected_outputs = [
+            np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),
+            np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(
+                np.float32
+            ),
+        ]
+
+        expect(
+            node,
+            inputs=[node_input, split],
+            outputs=expected_outputs,
+            name="test_split_variable_parts_2d_opset13",
+            opset_imports=[onnx.helper.make_opsetid("", 13)],
+        )
+
+    @staticmethod
+    def export_default_values_opset13() -> None:
+        node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)
+
+        # If axis is not specified, split is applied on default axis 0
+        node = onnx.helper.make_node(
+            "Split", inputs=["input"], outputs=["output_1", "output_2", "output_3"]
+        )
+
+        expected_outputs = [
+            np.array([1.0, 2.0]).astype(np.float32),
+            np.array([3.0, 4.0]).astype(np.float32),
+            np.array([5.0, 6.0]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input],
+            outputs=expected_outputs,
+            name="test_split_equal_parts_default_axis_opset13",
+            opset_imports=[onnx.helper.make_opsetid("", 13)],
+        )
+
+        split = np.array([2, 4]).astype(np.int64)
+        node = onnx.helper.make_node(
+            "Split", inputs=["input", "split"], outputs=["output_1", "output_2"]
+        )
+
+        expected_outputs = [
+            np.array([1.0, 2.0]).astype(np.float32),
+            np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input, split],
+            outputs=expected_outputs,
+            name="test_split_variable_parts_default_axis_opset13",
+            opset_imports=[onnx.helper.make_opsetid("", 13)],
+        )
+
+    @staticmethod
+    def export_zero_size_splits_opset13() -> None:
+        # 1-dimensional tensor with dimension_size=0
+        node_input = np.array([]).astype(np.float32)
+
+        # Split empty tensor to tensors of size zero
+        split = np.array([0, 0, 0]).astype(np.int64)
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input", "split"],
+            outputs=["output_1", "output_2", "output_3"],
+        )
+
+        expected_outputs = [
+            np.array([]).astype(np.float32),
+            np.array([]).astype(np.float32),
+            np.array([]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input, split],
+            outputs=expected_outputs,
+            name="test_split_zero_size_splits_opset13",
+            opset_imports=[onnx.helper.make_opsetid("", 13)],
+        )
+
+    @staticmethod
+    def export_1d_opset18() -> None:
+        node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input"],
+            outputs=["output_1", "output_2", "output_3"],
+            axis=0,
+            num_outputs=3,
+        )
+
+        expected_outputs = [
+            np.array([1.0, 2.0]).astype(np.float32),
+            np.array([3.0, 4.0]).astype(np.float32),
+            np.array([5.0, 6.0]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input],
+            outputs=expected_outputs,
+            name="test_split_equal_parts_1d_opset18",
+        )
+
+        split = np.array([2, 4]).astype(np.int64)
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input", "split"],
+            outputs=["output_1", "output_2"],
+            axis=0,
+        )
+
+        expected_outputs = [
+            np.array([1.0, 2.0]).astype(np.float32),
+            np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input, split],
+            outputs=expected_outputs,
+            name="test_split_variable_parts_1d_opset18",
+        )
+
+    @staticmethod
+    def export_2d_opset18() -> None:
+        node_input = np.array(
+            [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]]
+        ).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input"],
+            outputs=["output_1", "output_2"],
+            axis=1,
+            num_outputs=2,
+        )
+
+        expected_outputs = [
+            np.array([[1.0, 2.0, 3.0], [7.0, 8.0, 9.0]]).astype(np.float32),
+            np.array([[4.0, 5.0, 6.0], [10.0, 11.0, 12.0]]).astype(np.float32),
+        ]
+
+        expect(
+            node,
+            inputs=[node_input],
+            outputs=expected_outputs,
+            name="test_split_equal_parts_2d",
+        )
+
+        split = np.array([2, 4]).astype(np.int64)
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input", "split"],
+            outputs=["output_1", "output_2"],
+            axis=1,
+        )
+
+        expected_outputs = [
+            np.array([[1.0, 2.0], [7.0, 8.0]]).astype(np.float32),
+            np.array([[3.0, 4.0, 5.0, 6.0], [9.0, 10.0, 11.0, 12.0]]).astype(
+                np.float32
+            ),
+        ]
+
+        expect(
+            node,
+            inputs=[node_input, split],
+            outputs=expected_outputs,
+            name="test_split_variable_parts_2d_opset18",
+        )
+
+    @staticmethod
+    def export_default_values_opset18() -> None:
+        node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)
+
+        # If axis is not specified, split is applied on default axis 0
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input"],
+            outputs=["output_1", "output_2", "output_3"],
+            num_outputs=3,
+        )
+
+        expected_outputs = [
+            np.array([1.0, 2.0]).astype(np.float32),
+            np.array([3.0, 4.0]).astype(np.float32),
+            np.array([5.0, 6.0]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input],
+            outputs=expected_outputs,
+            name="test_split_equal_parts_default_axis_opset18",
+        )
+
+        split = np.array([2, 4]).astype(np.int64)
+        node = onnx.helper.make_node(
+            "Split", inputs=["input", "split"], outputs=["output_1", "output_2"]
+        )
+
+        expected_outputs = [
+            np.array([1.0, 2.0]).astype(np.float32),
+            np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input, split],
+            outputs=expected_outputs,
+            name="test_split_variable_parts_default_axis_opset18",
+        )
+
+    @staticmethod
+    def export_zero_size_splits_opset18() -> None:
+        # 1-dimensional tensor with dimension_size=0
+        node_input = np.array([]).astype(np.float32)
+
+        # Split empty tensor to tensors of size zero
+        split = np.array([0, 0, 0]).astype(np.int64)
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input", "split"],
+            outputs=["output_1", "output_2", "output_3"],
+        )
+
+        expected_outputs = [
+            np.array([]).astype(np.float32),
+            np.array([]).astype(np.float32),
+            np.array([]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input, split],
+            outputs=expected_outputs,
+            name="test_split_zero_size_splits_opset18",
+        )
+
+    @staticmethod
+    def export_1d_uneven_split_opset18() -> None:
+        node_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]).astype(np.float32)
+
+        # If axis is not specified, split is applied on default axis 0
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input"],
+            outputs=["output_1", "output_2", "output_3", "output_4"],
+            num_outputs=4,
+        )
+
+        expected_outputs = [
+            np.array([1.0, 2.0]).astype(np.float32),
+            np.array([3.0, 4.0]).astype(np.float32),
+            np.array([5.0, 6.0]).astype(np.float32),
+            np.array([7.0]).astype(np.float32),
+        ]
+        expect(
+            node,
+            inputs=[node_input],
+            outputs=expected_outputs,
+            name="test_split_1d_uneven_split_opset18",
+        )
+
+    @staticmethod
+    def export_2d_uneven_split_opset18() -> None:
+        node_input = np.array(
+            [
+                [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
+                [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0],
+            ]
+        ).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "Split",
+            inputs=["input"],
+            outputs=["output_1", "output_2", "output_3"],
+            axis=1,
+            num_outputs=3,
+        )
+
+        expected_outputs = [
+            np.array([[1.0, 2.0, 3.0], [9.0, 10.0, 11.0]]).astype(np.float32),
+            np.array([[4.0, 5.0, 6.0], [12.0, 13.0, 14.0]]).astype(np.float32),
+            np.array([[7.0, 8.0], [15.0, 16.0]]).astype(np.float32),
+        ]
+
+        expect(
+            node,
+            inputs=[node_input],
+            outputs=expected_outputs,
+            name="test_split_2d_uneven_split_opset18",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/splittosequence.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/splittosequence.py
new file mode 100644
index 0000000000000000000000000000000000000000..f37fd222a2f6296bcbae3f1434b8c00082b7a641
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/splittosequence.py
@@ -0,0 +1,80 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class SplitToSequence(Base):
+    @staticmethod
+    def export_with_split_1() -> None:
+        data = np.arange(18).reshape((3, 6)).astype(np.float32)
+        split = np.array(2, dtype=np.int64)
+
+        node = onnx.helper.make_node(
+            "SplitToSequence", ["data", "split"], ["seq"], axis=1
+        )
+
+        expected_outputs = [
+            [
+                np.array([[0.0, 1.0], [6.0, 7.0], [12.0, 13.0]], dtype=np.float32),
+                np.array([[2.0, 3.0], [8.0, 9.0], [14.0, 15.0]], dtype=np.float32),
+                np.array([[4.0, 5.0], [10.0, 11.0], [16.0, 17.0]], dtype=np.float32),
+            ]
+        ]
+
+        expect(
+            node,
+            inputs=[data, split],
+            outputs=expected_outputs,
+            name="test_split_to_sequence_1",
+        )
+
+    @staticmethod
+    def export_with_split_2() -> None:
+        data = np.arange(18).reshape((3, 6)).astype(np.float32)
+        split = np.array([1, 2], dtype=np.int64)
+
+        node = onnx.helper.make_node(
+            "SplitToSequence", ["data", "split"], ["seq"], axis=0
+        )
+
+        expected_outputs = [
+            [
+                data[:1],
+                data[1:],
+            ]
+        ]
+
+        expect(
+            node,
+            inputs=[data, split],
+            outputs=expected_outputs,
+            name="test_split_to_sequence_2",
+        )
+
+    @staticmethod
+    def export_nokeepdims() -> None:
+        data = np.arange(18).reshape((3, 6)).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "SplitToSequence",
+            ["data"],
+            ["seq"],
+            axis=1,
+            keepdims=0,
+        )
+
+        expected_outputs = [[data[:, i] for i in range(data.shape[1])]]
+
+        expect(
+            node,
+            inputs=[data],
+            outputs=expected_outputs,
+            name="test_split_to_sequence_nokeepdims",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sqrt.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sqrt.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff82419fd8d9c047a7d3b991c5a7e0dc53071289
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/sqrt.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Sqrt(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Sqrt",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([1, 4, 9]).astype(np.float32)
+        y = np.sqrt(x)  # expected output [1., 2., 3.]
+        expect(node, inputs=[x], outputs=[y], name="test_sqrt_example")
+
+        x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
+        y = np.sqrt(x)
+        expect(node, inputs=[x], outputs=[y], name="test_sqrt")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/squeeze.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/squeeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f67e6647126bbd16c8ef5c3dc2ff7f6767bb63a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/squeeze.py
@@ -0,0 +1,37 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Squeeze(Base):
+    @staticmethod
+    def export_squeeze() -> None:
+        node = onnx.helper.make_node(
+            "Squeeze",
+            inputs=["x", "axes"],
+            outputs=["y"],
+        )
+        x = np.random.randn(1, 3, 4, 5).astype(np.float32)
+        axes = np.array([0], dtype=np.int64)
+        y = np.squeeze(x, axis=0)
+
+        expect(node, inputs=[x, axes], outputs=[y], name="test_squeeze")
+
+    @staticmethod
+    def export_squeeze_negative_axes() -> None:
+        node = onnx.helper.make_node(
+            "Squeeze",
+            inputs=["x", "axes"],
+            outputs=["y"],
+        )
+        x = np.random.randn(1, 3, 1, 5).astype(np.float32)
+        axes = np.array([-2], dtype=np.int64)
+        y = np.squeeze(x, axis=-2)
+        expect(node, inputs=[x, axes], outputs=[y], name="test_squeeze_negative_axes")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/stft.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/stft.py
new file mode 100644
index 0000000000000000000000000000000000000000..71ed61b3c6696a54cf422b7d8f55c0e506fff01b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/stft.py
@@ -0,0 +1,70 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class STFT(Base):
+    @staticmethod
+    def export() -> None:
+        signal = np.arange(0, 128, dtype=np.float32).reshape(1, 128, 1)
+        length = np.array(16).astype(np.int64)
+        onesided_length = (length >> 1) + 1
+        step = np.array(8).astype(np.int64)
+
+        no_window = ""  # optional input, not supplied
+        node = onnx.helper.make_node(
+            "STFT",
+            inputs=["signal", "frame_step", no_window, "frame_length"],
+            outputs=["output"],
+        )
+
+        nstfts = ((signal.shape[1] - length) // step) + 1
+        # [batch_size][frames][frame_length][2]
+        output = np.empty([1, nstfts, onesided_length, 2], dtype=np.float32)
+        for i in range(nstfts):
+            start = i * step
+            stop = i * step + length
+            complex_out = np.fft.fft(signal[0, start:stop, 0])[0:onesided_length]
+            output[0, i] = np.stack((complex_out.real, complex_out.imag), axis=1)
+
+        output = output.astype(signal.dtype)
+        expect(node, inputs=[signal, step, length], outputs=[output], name="test_stft")
+
+        node = onnx.helper.make_node(
+            "STFT",
+            inputs=["signal", "frame_step", "window"],
+            outputs=["output"],
+        )
+
+        # Test with window
+        a0 = 0.5
+        a1 = 0.5
+        window = a0 + a1 * np.cos(
+            2 * np.pi * np.arange(0, length, 1, dtype=np.float32) / length
+        )
+        nstfts = 1 + (signal.shape[1] - window.shape[0]) // step
+
+        # [batch_size][frames][frame_length][2]
+        output = np.empty([1, nstfts, onesided_length, 2], dtype=np.float32)
+        for i in range(nstfts):
+            start = i * step
+            stop = i * step + length
+            complex_out = np.fft.fft(signal[0, start:stop, 0] * window)[
+                0:onesided_length
+            ]
+            output[0, i] = np.stack((complex_out.real, complex_out.imag), axis=1)
+        window = window.astype(signal.dtype)
+        output = output.astype(signal.dtype)
+        expect(
+            node,
+            inputs=[signal, step, window],
+            outputs=[output],
+            name="test_stft_with_window",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/string_concat.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/string_concat.py
new file mode 100644
index 0000000000000000000000000000000000000000..85b5cfb17fdac3912eb227df037b8437b8ec612e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/string_concat.py
@@ -0,0 +1,69 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class StringConcat(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "StringConcat",
+            inputs=["x", "y"],
+            outputs=["result"],
+        )
+        x = np.array(["abc", "def"]).astype("object")
+        y = np.array([".com", ".net"]).astype("object")
+        result = np.array(["abc.com", "def.net"]).astype("object")
+
+        expect(node, inputs=[x, y], outputs=[result], name="test_string_concat")
+
+        x = np.array(["cat", "dog", "snake"]).astype("object")
+        y = np.array(["s"]).astype("object")
+        result = np.array(["cats", "dogs", "snakes"]).astype("object")
+
+        expect(
+            node,
+            inputs=[x, y],
+            outputs=[result],
+            name="test_string_concat_broadcasting",
+        )
+
+        x = np.array("cat").astype("object")
+        y = np.array("s").astype("object")
+        result = np.array("cats").astype("object")
+
+        expect(
+            node,
+            inputs=[x, y],
+            outputs=[result],
+            name="test_string_concat_zero_dimensional",
+        )
+
+        x = np.array(["abc", ""]).astype("object")
+        y = np.array(["", "abc"]).astype("object")
+        result = np.array(["abc", "abc"]).astype("object")
+
+        expect(
+            node,
+            inputs=[x, y],
+            outputs=[result],
+            name="test_string_concat_empty_string",
+        )
+
+        x = np.array(["的", "中"]).astype("object")
+        y = np.array(["的", "中"]).astype("object")
+        result = np.array(["的的", "中中"]).astype("object")
+
+        expect(
+            node,
+            inputs=[x, y],
+            outputs=[result],
+            name="test_string_concat_utf8",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/string_split.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/string_split.py
new file mode 100644
index 0000000000000000000000000000000000000000..305bd578c8fe15bf5b50f5967998adaea5a6f6a5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/string_split.py
@@ -0,0 +1,151 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class StringSplit(Base):
+    @staticmethod
+    def export_basic() -> None:
+        node = onnx.helper.make_node(
+            "StringSplit",
+            inputs=["x"],
+            outputs=["substrings", "length"],
+            delimiter=".",
+            maxsplit=None,
+        )
+
+        x = np.array(["abc.com", "def.net"]).astype(object)
+
+        substrings = np.array([["abc", "com"], ["def", "net"]]).astype(object)
+
+        length = np.array([2, 2], dtype=np.int64)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[substrings, length],
+            name="test_string_split_basic",
+        )
+
+    @staticmethod
+    def export_maxsplit() -> None:
+        node = onnx.helper.make_node(
+            "StringSplit",
+            inputs=["x"],
+            outputs=["substrings", "length"],
+            maxsplit=2,
+        )
+
+        x = np.array(
+            [["hello world", "def.net"], ["o n n x", "the quick brown fox"]]
+        ).astype(object)
+
+        substrings = np.array(
+            [
+                [["hello", "world", ""], ["def.net", "", ""]],
+                [["o", "n", "n x"], ["the", "quick", "brown fox"]],
+            ]
+        ).astype(object)
+
+        length = np.array([[2, 1], [3, 3]], np.int64)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[substrings, length],
+            name="test_string_split_maxsplit",
+        )
+
+    @staticmethod
+    def export_consecutive_delimiters() -> None:
+        node = onnx.helper.make_node(
+            "StringSplit",
+            inputs=["x"],
+            outputs=["substrings", "length"],
+            delimiter="-",
+            maxsplit=None,
+        )
+
+        x = np.array(["o-n-n--x-", "o-n----nx"]).astype(object)
+
+        substrings = np.array(
+            [["o", "n", "n", "", "x", ""], ["o", "n", "", "", "", "nx"]]
+        ).astype(object)
+
+        length = np.array([6, 6], dtype=np.int64)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[substrings, length],
+            name="test_string_split_consecutive_delimiters",
+        )
+
+    @staticmethod
+    def export_empty_string_delimiter() -> None:
+        for delimiter, test_name in (
+            ("", "test_string_split_empty_string_delimiter"),
+            (None, "test_string_split_no_delimiter"),
+        ):
+            node = onnx.helper.make_node(
+                "StringSplit",
+                inputs=["x"],
+                outputs=["substrings", "length"],
+                delimiter=delimiter,
+                maxsplit=None,
+            )
+
+            x = np.array(
+                ["hello world !", "  hello   world !", " hello world   ! "]
+            ).astype(object)
+
+            substrings = np.array(
+                [
+                    ["hello", "world", "!"],
+                    ["hello", "world", "!"],
+                    ["hello", "world", "!"],
+                ]
+            ).astype(object)
+
+            length = np.array([3, 3, 3], dtype=np.int64)
+
+            expect(
+                node,
+                inputs=[x],
+                outputs=[substrings, length],
+                name=test_name,
+            )
+
+    @staticmethod
+    def export_empty_string_split() -> None:
+        node = onnx.helper.make_node(
+            "StringSplit",
+            inputs=["x"],
+            outputs=["substrings", "length"],
+            delimiter=None,
+            maxsplit=None,
+        )
+
+        x = np.array([]).astype(object)
+
+        substrings = np.array([]).astype(object).reshape(0, 0)
+
+        length = np.array([], dtype=np.int64)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[substrings, length],
+            name="test_string_split_empty_tensor",
+            output_type_protos=[
+                onnx.helper.make_tensor_type_proto(onnx.TensorProto.STRING, (0, None)),
+                None,
+            ],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/stringnormalizer.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/stringnormalizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..d945df61333e5ce82baf27894b97bdf829b76bec
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/stringnormalizer.py
@@ -0,0 +1,148 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class StringNormalizer(Base):
+    @staticmethod
+    def export_nostopwords_nochangecase() -> None:
+        input = np.array(["monday", "tuesday"]).astype(object)
+        output = input
+
+        # No stopwords. This is a NOOP
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            is_case_sensitive=1,
+        )
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_strnormalizer_nostopwords_nochangecase",
+        )
+
+    @staticmethod
+    def export_monday_casesensintive_nochangecase() -> None:
+        input = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
+        output = np.array(["tuesday", "wednesday", "thursday"]).astype(object)
+        stopwords = ["monday"]
+
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            is_case_sensitive=1,
+            stopwords=stopwords,
+        )
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_strnormalizer_export_monday_casesensintive_nochangecase",
+        )
+
+    @staticmethod
+    def export_monday_casesensintive_lower() -> None:
+        input = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
+        output = np.array(["tuesday", "wednesday", "thursday"]).astype(object)
+        stopwords = ["monday"]
+
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            case_change_action="LOWER",
+            is_case_sensitive=1,
+            stopwords=stopwords,
+        )
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_strnormalizer_export_monday_casesensintive_lower",
+        )
+
+    @staticmethod
+    def export_monday_casesensintive_upper() -> None:
+        input = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
+        output = np.array(["TUESDAY", "WEDNESDAY", "THURSDAY"]).astype(object)
+        stopwords = ["monday"]
+
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            case_change_action="UPPER",
+            is_case_sensitive=1,
+            stopwords=stopwords,
+        )
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_strnormalizer_export_monday_casesensintive_upper",
+        )
+
+    @staticmethod
+    def export_monday_empty_output() -> None:
+        input = np.array(["monday", "monday"]).astype(object)
+        output = np.array([""]).astype(object)
+        stopwords = ["monday"]
+
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            case_change_action="UPPER",
+            is_case_sensitive=1,
+            stopwords=stopwords,
+        )
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_strnormalizer_export_monday_empty_output",
+        )
+
+    @staticmethod
+    def export_monday_insensintive_upper_twodim() -> None:
+        input = (
+            np.array(
+                ["Monday", "tuesday", "wednesday", "Monday", "tuesday", "wednesday"]
+            )
+            .astype(object)
+            .reshape([1, 6])
+        )
+
+        # It does upper case cecedille, accented E
+        # and german umlaut but fails
+        # with german eszett
+        output = (
+            np.array(["TUESDAY", "WEDNESDAY", "TUESDAY", "WEDNESDAY"])
+            .astype(object)
+            .reshape([1, 4])
+        )
+        stopwords = ["monday"]
+
+        node = onnx.helper.make_node(
+            "StringNormalizer",
+            inputs=["x"],
+            outputs=["y"],
+            case_change_action="UPPER",
+            stopwords=stopwords,
+        )
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_strnormalizer_export_monday_insensintive_upper_twodim",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/swish.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/swish.py
new file mode 100644
index 0000000000000000000000000000000000000000..d08cd6338cc7ec31180d69931baa405335f88a47
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/swish.py
@@ -0,0 +1,36 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def swish(x: np.ndarray, alpha: float) -> np.ndarray:
+    return x * (1 / (1 + np.exp(-alpha * x)))
+
+
+class Swish(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Swish",
+            inputs=["x"],
+            outputs=["y"],
+            alpha=1.0,  # pass alpha as attribute
+        )
+
+        x = np.array([3, 4, 5], dtype=np.float32)
+        y = swish(x, alpha=1.0)
+
+        expect(
+            node,
+            inputs=[x],
+            outputs=[y],
+            name="test_swish",
+            opset_imports=[onnx.helper.make_opsetid("", 24)],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tan.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tan.py
new file mode 100644
index 0000000000000000000000000000000000000000..393cb82b3c3c8d7762ce4b288aee51e9f02621ef
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tan.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Tan(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Tan",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.tan(x)
+        expect(node, inputs=[x], outputs=[y], name="test_tan_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.tan(x)
+        expect(node, inputs=[x], outputs=[y], name="test_tan")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tanh.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tanh.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7926f35dc897132d9de28f8fe6e533923c6b09f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tanh.py
@@ -0,0 +1,28 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Tanh(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Tanh",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.array([-1, 0, 1]).astype(np.float32)
+        y = np.tanh(x)  # expected output [-0.76159418, 0., 0.76159418]
+        expect(node, inputs=[x], outputs=[y], name="test_tanh_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.tanh(x)
+        expect(node, inputs=[x], outputs=[y], name="test_tanh")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tensorscatter.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tensorscatter.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e32c25bed592c7f2fe4632176f07e61a00608ae
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tensorscatter.py
@@ -0,0 +1,174 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class TensorScatter(Base):
+    @staticmethod
+    def export_tensorscatter() -> None:
+        node = onnx.helper.make_node(
+            "TensorScatter",
+            inputs=["past_cache", "update", "write_indices"],
+            outputs=["present_cache"],
+            mode="linear",
+        )
+        past_cache = np.array(
+            [
+                [[[1, 2, 3, 4, 5], [5, 6, 7, 8, 9], [8, 7, 6, 5, 4], [4, 3, 2, 1, 0]]],
+                [[[1, 2, 3, 4, 5], [5, 6, 7, 8, 9], [8, 7, 6, 5, 4], [4, 3, 2, 1, 0]]],
+            ],
+            dtype=np.float32,
+        )
+        update = np.array(
+            [
+                [[[5, 5, 5, 5, 5]]],
+                [[[1, 1, 1, 1, 1]]],
+            ],
+            dtype=np.float32,
+        )
+        write_indices = np.array([1, 2], dtype=np.int64)
+        present_cache = np.array(
+            [
+                [[[1, 2, 3, 4, 5], [5, 5, 5, 5, 5], [8, 7, 6, 5, 4], [4, 3, 2, 1, 0]]],
+                [[[1, 2, 3, 4, 5], [5, 6, 7, 8, 9], [1, 1, 1, 1, 1], [4, 3, 2, 1, 0]]],
+            ],
+            dtype=np.float32,
+        )
+        expect(
+            node,
+            inputs=[past_cache, update, write_indices],
+            outputs=[present_cache],
+            name="test_tensorscatter",
+        )
+
+    @staticmethod
+    def export_tensorscatter_circular() -> None:
+        node = onnx.helper.make_node(
+            "TensorScatter",
+            inputs=["past_cache", "update", "write_indices"],
+            outputs=["present_cache"],
+            mode="circular",
+        )
+        past_cache = np.array(
+            [
+                [[[1, 2, 3, 4, 5], [5, 6, 7, 8, 9], [8, 7, 6, 5, 4], [4, 3, 2, 1, 0]]],
+                [[[1, 2, 3, 4, 5], [5, 6, 7, 8, 9], [8, 7, 6, 5, 4], [4, 3, 2, 1, 0]]],
+            ],
+            dtype=np.float32,
+        )
+        update = np.array(
+            [
+                [
+                    [
+                        [5, 5, 5, 5, 5],
+                        [6, 6, 6, 6, 6],
+                    ]
+                ],
+                [
+                    [
+                        [1, 1, 1, 1, 1],
+                        [2, 2, 2, 2, 2],
+                    ]
+                ],
+            ],
+            dtype=np.float32,
+        )
+        write_indices = np.array([1, 3], dtype=np.int64)
+        present_cache = np.array(
+            [
+                [[[1, 2, 3, 4, 5], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6], [4, 3, 2, 1, 0]]],
+                [[[2, 2, 2, 2, 2], [5, 6, 7, 8, 9], [8, 7, 6, 5, 4], [1, 1, 1, 1, 1]]],
+            ],
+            dtype=np.float32,
+        )
+        expect(
+            node,
+            inputs=[past_cache, update, write_indices],
+            outputs=[present_cache],
+            name="test_tensorscatter_circular",
+        )
+
+    @staticmethod
+    def export_tensorscatter_3d() -> None:
+        node = onnx.helper.make_node(
+            "TensorScatter",
+            inputs=["past_cache", "update", "write_indices"],
+            outputs=["present_cache"],
+        )
+        past_cache = np.array(
+            [
+                [
+                    [1, 2, 3, 4, 5],
+                    [5, 6, 7, 8, 9],
+                    [8, 7, 6, 5, 4],
+                    [5, 4, 3, 2, 1],
+                ],
+                [
+                    [1, 2, 3, 4, 5],
+                    [5, 6, 7, 8, 9],
+                    [8, 7, 6, 5, 4],
+                    [5, 4, 3, 2, 1],
+                ],
+                [
+                    [1, 2, 3, 4, 5],
+                    [5, 6, 7, 8, 9],
+                    [8, 7, 6, 5, 4],
+                    [5, 4, 3, 2, 1],
+                ],
+            ],
+            dtype=np.float32,
+        )
+        update = np.array(
+            [
+                [
+                    [4, 4, 4, 4, 4],
+                    [5, 5, 5, 5, 5],
+                ],
+                [
+                    [6, 6, 6, 6, 6],
+                    [7, 7, 7, 7, 7],
+                ],
+                [
+                    [2, 2, 2, 2, 2],
+                    [3, 3, 3, 3, 3],
+                ],
+            ],
+            dtype=np.float32,
+        )
+        write_indices = np.array([1, 2, 0], dtype=np.int64)
+        present_cache = np.array(
+            [
+                [
+                    [1, 2, 3, 4, 5],
+                    [4, 4, 4, 4, 4],
+                    [5, 5, 5, 5, 5],
+                    [5, 4, 3, 2, 1],
+                ],
+                [
+                    [1, 2, 3, 4, 5],
+                    [5, 6, 7, 8, 9],
+                    [6, 6, 6, 6, 6],
+                    [7, 7, 7, 7, 7],
+                ],
+                [
+                    [2, 2, 2, 2, 2],
+                    [3, 3, 3, 3, 3],
+                    [8, 7, 6, 5, 4],
+                    [5, 4, 3, 2, 1],
+                ],
+            ],
+            dtype=np.float32,
+        )
+        expect(
+            node,
+            inputs=[past_cache, update, write_indices],
+            outputs=[present_cache],
+            name="test_tensorscatter_3d",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tfidfvectorizer.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tfidfvectorizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f0c67025ff578ee48a27c8f7620e40d04ca9da9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tfidfvectorizer.py
@@ -0,0 +1,264 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+
+import onnx
+from onnx import NodeProto
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class TfIdfVectorizerHelper:
+    def __init__(self, **params: Any) -> None:
+        # Attr names
+        mode = "mode"
+        min_gram_length = "min_gram_length"
+        max_gram_length = "max_gram_length"
+        max_skip_count = "max_skip_count"
+        ngram_counts = "ngram_counts"
+        ngram_indexes = "ngram_indexes"
+        pool_int64s = "pool_int64s"
+
+        required_attr = [
+            mode,
+            min_gram_length,
+            max_gram_length,
+            max_skip_count,
+            ngram_counts,
+            ngram_indexes,
+            pool_int64s,
+        ]
+
+        for i in required_attr:
+            assert i in params, f"Missing attribute: {i}"
+
+        self.mode = params[mode]
+        self.min_gram_length = params[min_gram_length]
+        self.max_gram_length = params[max_gram_length]
+        self.max_skip_count = params[max_skip_count]
+        self.ngram_counts = params[ngram_counts]
+        self.ngram_indexes = params[ngram_indexes]
+        self.pool_int64s = params[pool_int64s]
+
+    def make_node_noweights(self) -> NodeProto:
+        return onnx.helper.make_node(
+            "TfIdfVectorizer",
+            inputs=["X"],
+            outputs=["Y"],
+            mode=self.mode,
+            min_gram_length=self.min_gram_length,
+            max_gram_length=self.max_gram_length,
+            max_skip_count=self.max_skip_count,
+            ngram_counts=self.ngram_counts,
+            ngram_indexes=self.ngram_indexes,
+            pool_int64s=self.pool_int64s,
+        )
+
+
+class TfIdfVectorizer(Base):
+    @staticmethod
+    def export_tf_only_bigrams_skip0() -> None:
+        input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)
+        output = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]).astype(np.float32)
+
+        ngram_counts = np.array([0, 4]).astype(np.int64)
+        ngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)
+        pool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype(  # unigrams
+            np.int64
+        )  # bigrams
+
+        helper = TfIdfVectorizerHelper(
+            mode="TF",
+            min_gram_length=2,
+            max_gram_length=2,
+            max_skip_count=0,
+            ngram_counts=ngram_counts,
+            ngram_indexes=ngram_indexes,
+            pool_int64s=pool_int64s,
+        )
+        node = helper.make_node_noweights()
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_tfidfvectorizer_tf_only_bigrams_skip0",
+        )
+
+    @staticmethod
+    def export_tf_batch_onlybigrams_skip0() -> None:
+        input = np.array([[1, 1, 3, 3, 3, 7], [8, 6, 7, 5, 6, 8]]).astype(np.int32)
+        output = np.array(
+            [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0]]
+        ).astype(np.float32)
+
+        ngram_counts = np.array([0, 4]).astype(np.int64)
+        ngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)
+        pool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype(  # unigrams
+            np.int64
+        )  # bigrams
+
+        helper = TfIdfVectorizerHelper(
+            mode="TF",
+            min_gram_length=2,
+            max_gram_length=2,
+            max_skip_count=0,
+            ngram_counts=ngram_counts,
+            ngram_indexes=ngram_indexes,
+            pool_int64s=pool_int64s,
+        )
+        node = helper.make_node_noweights()
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_tfidfvectorizer_tf_batch_onlybigrams_skip0",
+        )
+
+    @staticmethod
+    def export_tf_onlybigrams_levelempty() -> None:
+        input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)
+        output = np.array([1.0, 1.0, 1.0]).astype(np.float32)
+
+        ngram_counts = np.array([0, 0]).astype(np.int64)
+        ngram_indexes = np.array([0, 1, 2]).astype(np.int64)
+        pool_int64s = np.array([5, 6, 7, 8, 6, 7]).astype(  # unigrams none
+            np.int64
+        )  # bigrams
+
+        helper = TfIdfVectorizerHelper(
+            mode="TF",
+            min_gram_length=2,
+            max_gram_length=2,
+            max_skip_count=0,
+            ngram_counts=ngram_counts,
+            ngram_indexes=ngram_indexes,
+            pool_int64s=pool_int64s,
+        )
+        node = helper.make_node_noweights()
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_tfidfvectorizer_tf_onlybigrams_levelempty",
+        )
+
+    @staticmethod
+    def export_tf_onlybigrams_skip5() -> None:
+        input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)
+        output = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 3.0, 1.0]).astype(np.float32)
+
+        ngram_counts = np.array([0, 4]).astype(np.int64)
+        ngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)
+        pool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype(  # unigrams
+            np.int64
+        )  # bigrams
+
+        helper = TfIdfVectorizerHelper(
+            mode="TF",
+            min_gram_length=2,
+            max_gram_length=2,
+            max_skip_count=5,
+            ngram_counts=ngram_counts,
+            ngram_indexes=ngram_indexes,
+            pool_int64s=pool_int64s,
+        )
+        node = helper.make_node_noweights()
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_tfidfvectorizer_tf_onlybigrams_skip5",
+        )
+
+    @staticmethod
+    def export_tf_batch_onlybigrams_skip5() -> None:
+        input = np.array([[1, 1, 3, 3, 3, 7], [8, 6, 7, 5, 6, 8]]).astype(np.int32)
+        output = np.array(
+            [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]]
+        ).astype(np.float32)
+
+        ngram_counts = np.array([0, 4]).astype(np.int64)
+        ngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)
+        pool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype(  # unigrams
+            np.int64
+        )  # bigrams
+
+        helper = TfIdfVectorizerHelper(
+            mode="TF",
+            min_gram_length=2,
+            max_gram_length=2,
+            max_skip_count=5,
+            ngram_counts=ngram_counts,
+            ngram_indexes=ngram_indexes,
+            pool_int64s=pool_int64s,
+        )
+        node = helper.make_node_noweights()
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_tfidfvectorizer_tf_batch_onlybigrams_skip5",
+        )
+
+    @staticmethod
+    def export_tf_uniandbigrams_skip5() -> None:
+        input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)
+        output = np.array([0.0, 3.0, 1.0, 0.0, 1.0, 3.0, 1.0]).astype(np.float32)
+
+        ngram_counts = np.array([0, 4]).astype(np.int64)
+        ngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)
+        pool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype(  # unigrams
+            np.int64
+        )  # bigrams
+
+        helper = TfIdfVectorizerHelper(
+            mode="TF",
+            min_gram_length=1,
+            max_gram_length=2,
+            max_skip_count=5,
+            ngram_counts=ngram_counts,
+            ngram_indexes=ngram_indexes,
+            pool_int64s=pool_int64s,
+        )
+        node = helper.make_node_noweights()
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_tfidfvectorizer_tf_uniandbigrams_skip5",
+        )
+
+    @staticmethod
+    def export_tf_batch_uniandbigrams_skip5() -> None:
+        input = np.array([[1, 1, 3, 3, 3, 7], [8, 6, 7, 5, 6, 8]]).astype(np.int32)
+        output = np.array(
+            [[0.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0]]
+        ).astype(np.float32)
+
+        ngram_counts = np.array([0, 4]).astype(np.int64)
+        ngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)
+        pool_int64s = np.array([2, 3, 5, 4, 5, 6, 7, 8, 6, 7]).astype(  # unigrams
+            np.int64
+        )  # bigrams
+
+        helper = TfIdfVectorizerHelper(
+            mode="TF",
+            min_gram_length=1,
+            max_gram_length=2,
+            max_skip_count=5,
+            ngram_counts=ngram_counts,
+            ngram_indexes=ngram_indexes,
+            pool_int64s=pool_int64s,
+        )
+        node = helper.make_node_noweights()
+        expect(
+            node,
+            inputs=[input],
+            outputs=[output],
+            name="test_tfidfvectorizer_tf_batch_uniandbigrams_skip5",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/thresholdedrelu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/thresholdedrelu.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f3fc6c2055aa2f57c8689a762d92139955df674
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/thresholdedrelu.py
@@ -0,0 +1,41 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class ThresholdedRelu(Base):
+    @staticmethod
+    def export() -> None:
+        alpha = 2.0
+        node = onnx.helper.make_node(
+            "ThresholdedRelu", inputs=["x"], outputs=["y"], alpha=alpha
+        )
+
+        x = np.array([-1.5, 0.0, 1.2, 2.0, 2.2]).astype(np.float32)
+        y = np.clip(x, alpha, np.inf)  # expected output [0., 0., 0., 0., 2.2]
+        y[y == alpha] = 0
+
+        expect(node, inputs=[x], outputs=[y], name="test_thresholdedrelu_example")
+
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, alpha, np.inf)
+        y[y == alpha] = 0
+
+        expect(node, inputs=[x], outputs=[y], name="test_thresholdedrelu")
+
+    @staticmethod
+    def export_default() -> None:
+        default_alpha = 1.0
+        node = onnx.helper.make_node("ThresholdedRelu", inputs=["x"], outputs=["y"])
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        y = np.clip(x, default_alpha, np.inf)
+        y[y == default_alpha] = 0
+
+        expect(node, inputs=[x], outputs=[y], name="test_thresholdedrelu_default")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tile.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tile.py
new file mode 100644
index 0000000000000000000000000000000000000000..d55555c079d498f3ca9a30e938cecc000aab11e4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/tile.py
@@ -0,0 +1,38 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Tile(Base):
+    @staticmethod
+    def export_tile() -> None:
+        node = onnx.helper.make_node("Tile", inputs=["x", "y"], outputs=["z"])
+
+        x = np.random.rand(2, 3, 4, 5).astype(np.float32)
+
+        repeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)
+
+        z = np.tile(x, repeats)
+
+        expect(node, inputs=[x, repeats], outputs=[z], name="test_tile")
+
+    @staticmethod
+    def export_tile_precomputed() -> None:
+        node = onnx.helper.make_node("Tile", inputs=["x", "y"], outputs=["z"])
+
+        x = np.array([[0, 1], [2, 3]], dtype=np.float32)
+
+        repeats = np.array([2, 2], dtype=np.int64)
+
+        z = np.array(
+            [[0, 1, 0, 1], [2, 3, 2, 3], [0, 1, 0, 1], [2, 3, 2, 3]], dtype=np.float32
+        )
+
+        expect(node, inputs=[x, repeats], outputs=[z], name="test_tile_precomputed")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/topk.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/topk.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a14a5a173e27a0386aa050acadc2b3d37719f72
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/topk.py
@@ -0,0 +1,262 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def topk_sorted_implementation(X, k, axis, largest):
+    ind_axis = np.indices(X.shape)[axis]
+    if largest:
+        ind_axis = -ind_axis
+    sorted_indices = np.lexsort((ind_axis, X), axis=axis)
+    sorted_values = np.sort(X, axis=axis)
+    if largest:
+        sorted_indices = np.flip(sorted_indices, axis=axis)
+        sorted_values = np.flip(sorted_values, axis=axis)
+    topk_sorted_indices = np.take(sorted_indices, np.arange(k), axis=axis)
+    topk_sorted_values = np.take(sorted_values, np.arange(k), axis=axis)
+    return topk_sorted_values, np.array(topk_sorted_indices, dtype=np.int64)
+
+
+class TopK(Base):
+    @staticmethod
+    def export_top_k() -> None:
+        axis = 1
+        largest = 1
+
+        k = 3
+        node = onnx.helper.make_node(
+            "TopK", inputs=["x", "k"], outputs=["values", "indices"], axis=axis
+        )
+        X = np.array(
+            [
+                [0, 1, 2, 3],
+                [4, 5, 6, 7],
+                [8, 9, 10, 11],
+            ],
+            dtype=np.float32,
+        )
+        K = np.array([k], dtype=np.int64)
+        values_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)
+
+        # print(values_ref)
+        # [[ 3.  2.  1.]
+        # [ 7.  6.  5.]
+        # [11. 10.  9.]]
+        # print(indices_ref)
+        # [[3 2 1]
+        # [3 2 1]
+        # [3 2 1]]
+
+        expect(
+            node, inputs=[X, K], outputs=[values_ref, indices_ref], name="test_top_k"
+        )
+
+    @staticmethod
+    def export_top_k_uint64() -> None:
+        axis = 1
+        largest = 1
+
+        k = 3
+        node = onnx.helper.make_node(
+            "TopK", inputs=["x", "k"], outputs=["values", "indices"], axis=axis
+        )
+        X = np.array(
+            [
+                [0, 1, 2, 3],
+                [4, 5, 6, 7],
+                [8, 9, 10, 11],
+            ],
+            dtype=np.uint64,
+        )
+        K = np.array([k], dtype=np.int64)
+        values_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)
+
+        # print(values_ref)
+        # [[ 3  2  1]
+        # [ 7  6  5]
+        # [11 10  9]]
+        # print(indices_ref)
+        # [[3 2 1]
+        # [3 2 1]
+        # [3 2 1]]
+
+        expect(
+            node,
+            inputs=[X, K],
+            outputs=[values_ref, indices_ref],
+            name="test_top_k_uint64",
+        )
+
+    @staticmethod
+    def export_top_k_same_values() -> None:
+        axis = 0
+        largest = 0
+
+        k = 3
+        node = onnx.helper.make_node(
+            "TopK", inputs=["x", "k"], outputs=["values", "indices"], axis=axis
+        )
+        X = np.array(
+            [0, 0, 0, 0],
+            dtype=np.int64,
+        )
+        K = np.array([k], dtype=np.int64)
+        values_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)
+
+        # (Pdb) print(values_ref)
+        # [0 0 0]
+        # (Pdb) print(indices_ref)
+        # [0 1 2]
+
+        expect(
+            node,
+            inputs=[X, K],
+            outputs=[values_ref, indices_ref],
+            name="test_top_k_same_values",
+        )
+
+    @staticmethod
+    def export_top_k_same_values_largest() -> None:
+        axis = 0
+        largest = 1
+
+        k = 3
+        node = onnx.helper.make_node(
+            "TopK", inputs=["x", "k"], outputs=["values", "indices"], axis=axis
+        )
+        X = np.array(
+            [0, 0, 0, 0],
+            dtype=np.int64,
+        )
+        K = np.array([k], dtype=np.int64)
+        values_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)
+
+        # print(values_ref)
+        # [0 0 0]
+        # print(indices_ref)
+        # [0 1 2]
+
+        expect(
+            node,
+            inputs=[X, K],
+            outputs=[values_ref, indices_ref],
+            name="test_top_k_same_values_largest",
+        )
+
+    @staticmethod
+    def export_top_k_same_values_2d() -> None:
+        axis = 1
+        largest = 1
+
+        k = 3
+        node = onnx.helper.make_node(
+            "TopK", inputs=["x", "k"], outputs=["values", "indices"], axis=axis
+        )
+        X = np.array(
+            [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 1, 1]],
+            dtype=np.int64,
+        )
+        K = np.array([k], dtype=np.int64)
+        values_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)
+
+        # print(values_ref)
+        # [[0 0 0]
+        # [1 1 1]
+        # [1 1 2]]
+        # print(indices_ref)
+        # [[0 1 2]
+        # [0 1 2]
+        # [2 3 0]]
+
+        expect(
+            node,
+            inputs=[X, K],
+            outputs=[values_ref, indices_ref],
+            name="test_top_k_same_values_2d",
+        )
+
+    @staticmethod
+    def export_top_k_smallest() -> None:
+        axis = 1
+        largest = 0
+        sorted = 1  # noqa: A001
+        k = 3
+
+        node = onnx.helper.make_node(
+            "TopK",
+            inputs=["x", "k"],
+            outputs=["values", "indices"],
+            axis=axis,
+            largest=largest,
+            sorted=sorted,
+        )
+
+        X = np.array(
+            [
+                [0, 1, 2, 3],
+                [4, 5, 6, 7],
+                [11, 10, 9, 8],
+            ],
+            dtype=np.float32,
+        )
+        K = np.array([k], dtype=np.int64)
+        values_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)
+
+        # print(values_ref)
+        # [[ 0.  1.  2.]
+        # [ 4.  5.  6.]
+        # [ 8.  9. 10.]]
+        # print(indices_ref)
+        # [[0 1 2]
+        # [0 1 2]
+        # [3 2 1]]
+
+        expect(
+            node,
+            inputs=[X, K],
+            outputs=[values_ref, indices_ref],
+            name="test_top_k_smallest",
+        )
+
+    @staticmethod
+    def export_top_k_negative_axis() -> None:
+        axis = -1
+        largest = 1
+
+        k = 3
+        node = onnx.helper.make_node(
+            "TopK", inputs=["x", "k"], outputs=["values", "indices"], axis=axis
+        )
+        X = np.array(
+            [
+                [0, 1, 2, 3],
+                [4, 5, 6, 7],
+                [8, 9, 10, 11],
+            ],
+            dtype=np.float32,
+        )
+        K = np.array([k], dtype=np.int64)
+        values_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)
+
+        # print(values_ref)
+        # [[ 3.  2.  1.]
+        # [ 7.  6.  5.]
+        # [11. 10.  9.]]
+        # print(indices_ref)
+        # [[3 2 1]
+        # [3 2 1]
+        # [3 2 1]]
+
+        expect(
+            node,
+            inputs=[X, K],
+            outputs=[values_ref, indices_ref],
+            name="test_top_k_negative_axis",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/transpose.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/transpose.py
new file mode 100644
index 0000000000000000000000000000000000000000..95fbdc042eaf59145d64c488ade5aa7f754a8ce8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/transpose.py
@@ -0,0 +1,47 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import itertools
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Transpose(Base):
+    @staticmethod
+    def export_default() -> None:
+        shape = (2, 3, 4)
+        data = np.random.random_sample(shape).astype(np.float32)
+
+        node = onnx.helper.make_node(
+            "Transpose", inputs=["data"], outputs=["transposed"]
+        )
+
+        transposed = np.transpose(data)
+        expect(node, inputs=[data], outputs=[transposed], name="test_transpose_default")
+
+    @staticmethod
+    def export_all_permutations() -> None:
+        shape = (2, 3, 4)
+        data = np.random.random_sample(shape).astype(np.float32)
+        permutations = list(itertools.permutations(np.arange(len(shape))))
+
+        for i, permutation in enumerate(permutations):
+            node = onnx.helper.make_node(
+                "Transpose",
+                inputs=["data"],
+                outputs=["transposed"],
+                perm=permutation,
+            )
+            transposed = np.transpose(data, permutation)
+            expect(
+                node,
+                inputs=[data],
+                outputs=[transposed],
+                name=f"test_transpose_all_permutations_{i}",
+            )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/trilu.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/trilu.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9443c3e08edb895475eedccedd9218641cb8aca
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/trilu.py
@@ -0,0 +1,453 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def triu_reference_implementation(x, k=0):
+    return np.triu(x, k)
+
+
+def tril_reference_implementation(x, k=0):
+    return np.tril(x, k)
+
+
+class Trilu(Base):
+    @staticmethod
+    def export_triu() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 0, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[4, 7, 3, 7, 9],
+        #   [0, 2, 8, 6, 9],
+        #   [0, 0, 0, 8, 7],
+        #   [0, 0, 0, 2, 4]]
+        y = triu_reference_implementation(x)
+        expect(node, inputs=[x], outputs=[y], name="test_triu")
+
+    @staticmethod
+    def export_triu_neg() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        k = np.array(-1).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 0, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [0, 4, 0, 8, 7],
+        #   [0, 0, 4, 2, 4]]
+        y = triu_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_triu_neg")
+
+    @staticmethod
+    def export_triu_out_neg_out() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        k = np.array(-7).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 0, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 0, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        y = triu_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_triu_out_neg_out")
+
+    @staticmethod
+    def export_triu_pos() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        k = np.array(2).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 0, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[0, 0, 3, 7, 9],
+        #   [0, 0, 0, 6, 9],
+        #   [0, 0, 0, 0, 7],
+        #   [0, 0, 0, 0, 0]]
+        y = triu_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_triu_pos")
+
+    @staticmethod
+    def export_triu_out_pos() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        k = np.array(6).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 0, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[0, 0, 0, 0, 0],
+        #   [0, 0, 0, 0, 0],
+        #   [0, 0, 0, 0, 0],
+        #   [0, 0, 0, 0, 0]]
+        y = triu_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_triu_out_pos")
+
+    @staticmethod
+    def export_triu_square() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(10, size=(2, 3, 3)).astype(np.int64)
+        y = triu_reference_implementation(x)
+        # X:
+        # [[[4, 6, 9],
+        #   [7, 5, 4],
+        #   [8, 1, 2]],
+        #
+        #  [[1, 4, 9],
+        #   [9, 6, 3],
+        #   [8, 9, 8]]]
+        # expect result:
+        # [[[4, 6, 9],
+        #   [0, 5, 4],
+        #   [0, 0, 2]],
+        #
+        #  [[1, 4, 9],
+        #   [0, 6, 3],
+        #   [0, 0, 8]]]
+        expect(node, inputs=[x], outputs=[y], name="test_triu_square")
+
+    @staticmethod
+    def export_triu_square_neg() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(10, size=(2, 3, 3)).astype(np.int64)
+        k = np.array(-1).astype(np.int64)
+        # X:
+        # [[[4, 6, 9],
+        #   [7, 5, 4],
+        #   [8, 1, 2]],
+        #
+        #  [[1, 4, 9],
+        #   [9, 6, 3],
+        #   [8, 9, 8]]]
+        # expect result:
+        # [[[4, 6, 9],
+        #   [7, 5, 4],
+        #   [0, 1, 2]],
+        #
+        #  [[1, 4, 9],
+        #   [9, 6, 3],
+        #   [0, 9, 8]]]
+        y = triu_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_triu_square_neg")
+
+    @staticmethod
+    def export_triu_one_row() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(10, size=(3, 1, 5)).astype(np.int64)
+        k = np.array(1).astype(np.int64)
+        # X:
+        # [[[1, 4, 9, 7, 1]],
+        #
+        #  [[9, 2, 8, 8, 4]],
+        #
+        #  [[3, 9, 7, 4, 2]]]
+        # expect result:
+        # [[[0, 4, 9, 7, 1]],
+        #
+        #  [[0, 2, 8, 8, 4]],
+        #
+        #  [[0, 9, 7, 4, 2]]]
+        y = triu_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_triu_one_row")
+
+    @staticmethod
+    def export_triu_zero() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+        )
+
+        x = np.random.randint(10, size=(0, 5)).astype(np.int64)
+        k = np.array(6).astype(np.int64)
+        # X:
+        # []
+        # expect result:
+        # []
+        y = triu_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_triu_zero")
+
+    @staticmethod
+    def export_tril() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x"],
+            outputs=["y"],
+            upper=0,
+        )
+
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 1, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[4, 0, 0, 0, 0],
+        #   [1, 2, 0, 0, 0],
+        #   [9, 4, 1, 0, 0],
+        #   [4, 3, 4, 2, 0]]
+        y = tril_reference_implementation(x)
+        expect(node, inputs=[x], outputs=[y], name="test_tril")
+
+    @staticmethod
+    def export_tril_neg() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+            upper=0,
+        )
+
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        k = np.array(-1).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 1, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[0, 0, 0, 0, 0],
+        #   [1, 0, 0, 0, 0],
+        #   [9, 4, 0, 0, 0],
+        #   [4, 3, 4, 0, 0]]
+        y = tril_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_tril_neg")
+
+    @staticmethod
+    def export_tril_out_neg() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+            upper=0,
+        )
+
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        k = np.array(-7).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 1, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[0, 0, 0, 0, 0],
+        #   [0, 0, 0, 0, 0],
+        #   [0, 0, 0, 0, 0],
+        #   [0, 0, 0, 0, 0]]
+        y = tril_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_tril_out_neg")
+
+    @staticmethod
+    def export_tril_pos() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+            upper=0,
+        )
+
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        k = np.array(2).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 1, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[4, 7, 3, 0, 0],
+        #   [1, 2, 8, 6, 0],
+        #   [9, 4, 1, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        y = tril_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_tril_pos")
+
+    @staticmethod
+    def export_tril_out_pos() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+            upper=0,
+        )
+        x = np.random.randint(10, size=(4, 5)).astype(np.int64)
+        k = np.array(6).astype(np.int64)
+        # X:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 1, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        # expect result:
+        #  [[4, 7, 3, 7, 9],
+        #   [1, 2, 8, 6, 9],
+        #   [9, 4, 1, 8, 7],
+        #   [4, 3, 4, 2, 4]]
+        y = tril_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_tril_out_pos")
+
+    @staticmethod
+    def export_tril_square() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x"],
+            outputs=["y"],
+            upper=0,
+        )
+
+        x = np.random.randint(10, size=(2, 3, 3)).astype(np.int64)
+        # X:
+        # [[[0, 4, 3],
+        #   [2, 0, 9],
+        #   [8, 2, 5]],
+        #
+        #  [[2, 7, 2],
+        #   [2, 6, 0],
+        #   [2, 6, 5]]]
+        # expect result:
+        # [[[0, 0, 0],
+        #   [2, 0, 0],
+        #   [8, 2, 5]],
+        #
+        #  [[2, 0, 0],
+        #   [2, 6, 0],
+        #   [2, 6, 5]]]
+        y = tril_reference_implementation(x)
+        expect(node, inputs=[x], outputs=[y], name="test_tril_square")
+
+    @staticmethod
+    def export_tril_square_neg() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+            upper=0,
+        )
+
+        x = np.random.randint(10, size=(2, 3, 3)).astype(np.int64)
+        k = np.array(-1).astype(np.int64)
+        # X:
+        # [[[0, 4, 3],
+        #   [2, 0, 9],
+        #   [8, 2, 5]],
+        #
+        #  [[2, 7, 2],
+        #   [2, 6, 0],
+        #   [2, 6, 5]]]
+        # expect result:
+        # [[[0, 0, 0],
+        #   [2, 0, 0],
+        #   [8, 2, 0]],
+        #
+        #  [[0, 0, 0],
+        #   [2, 0, 0],
+        #   [2, 6, 0]]]
+        y = tril_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_tril_square_neg")
+
+    @staticmethod
+    def export_tril_one_row() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x"],
+            outputs=["y"],
+            upper=0,
+        )
+
+        x = np.random.randint(10, size=(3, 1, 5)).astype(np.int64)
+        # X:
+        # [[[6, 2, 4, 1, 6]],
+        #
+        #  [[8, 3, 8, 7, 0]],
+        #
+        #  [[2, 2, 9, 5, 9]]]
+        # expect result:
+        # [[[6, 0, 0, 0, 0]],
+        #
+        #  [[8, 0, 0, 0, 0]],
+        #
+        #  [[2, 0, 0, 0, 0]]]
+        y = tril_reference_implementation(x)
+        expect(node, inputs=[x], outputs=[y], name="test_tril_one_row_neg")
+
+    @staticmethod
+    def export_tril_zero() -> None:
+        node = onnx.helper.make_node(
+            "Trilu",
+            inputs=["x", "k"],
+            outputs=["y"],
+            upper=0,
+        )
+
+        x = np.random.randint(10, size=(3, 0, 5)).astype(np.int64)
+        k = np.array(6).astype(np.int64)
+        # X:
+        # []
+        # expect result:
+        # []
+        y = tril_reference_implementation(x, int(k))
+        expect(node, inputs=[x, k], outputs=[y], name="test_tril_zero")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/unique.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/unique.py
new file mode 100644
index 0000000000000000000000000000000000000000..2827f57563b71d8762cf4bd736333a1c78943ff5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/unique.py
@@ -0,0 +1,229 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def specify_int64(indices, inverse_indices, counts):
+    return (
+        np.array(indices, dtype=np.int64),
+        np.array(inverse_indices, dtype=np.int64),
+        np.array(counts, dtype=np.int64),
+    )
+
+
+class Unique(Base):
+    @staticmethod
+    def export_sorted_without_axis() -> None:
+        node_sorted = onnx.helper.make_node(
+            "Unique",
+            inputs=["X"],
+            outputs=["Y", "indices", "inverse_indices", "counts"],
+        )
+
+        x = np.array([2.0, 1.0, 1.0, 3.0, 4.0, 3.0], dtype=np.float32)
+        y, indices, inverse_indices, counts = np.unique(x, True, True, True)
+        indices, inverse_indices, counts = specify_int64(
+            indices, inverse_indices, counts
+        )
+        expect(
+            node_sorted,
+            inputs=[x],
+            outputs=[y, indices, inverse_indices, counts],
+            name="test_unique_sorted_without_axis",
+        )
+
+    @staticmethod
+    def export_not_sorted_without_axis() -> None:
+        node_not_sorted = onnx.helper.make_node(
+            "Unique",
+            inputs=["X"],
+            outputs=["Y", "indices", "inverse_indices", "counts"],
+            sorted=0,
+        )
+        # numpy unique does not retain original order (it sorts the output unique values)
+        # https://github.com/numpy/numpy/issues/8621
+        # we need to recover unsorted output and indices
+        x = np.array([2.0, 1.0, 1.0, 3.0, 4.0, 3.0], dtype=np.float32)
+        y, indices, inverse_indices, counts = np.unique(x, True, True, True)
+
+        # prepare index mapping from sorted to unsorted
+        argsorted_indices = np.argsort(indices)
+        inverse_indices_map = dict(
+            zip(argsorted_indices, np.arange(len(argsorted_indices)))
+        )
+
+        indices = indices[argsorted_indices]
+        y = np.take(x, indices, axis=0)
+        inverse_indices = np.asarray(
+            [inverse_indices_map[i] for i in inverse_indices], dtype=np.int64
+        )
+        counts = counts[argsorted_indices]
+        indices, inverse_indices, counts = specify_int64(
+            indices, inverse_indices, counts
+        )
+        # print(y)
+        # [2.0, 1.0, 3.0, 4.0]
+        # print(indices)
+        # [0 1 3 4]
+        # print(inverse_indices)
+        # [0, 1, 1, 2, 3, 2]
+        # print(counts)
+        # [1, 2, 2, 1]
+
+        expect(
+            node_not_sorted,
+            inputs=[x],
+            outputs=[y, indices, inverse_indices, counts],
+            name="test_unique_not_sorted_without_axis",
+        )
+
+    @staticmethod
+    def export_sorted_with_axis() -> None:
+        node_sorted = onnx.helper.make_node(
+            "Unique",
+            inputs=["X"],
+            outputs=["Y", "indices", "inverse_indices", "counts"],
+            sorted=1,
+            axis=0,
+        )
+
+        x = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]], dtype=np.float32)
+        y, indices, inverse_indices, counts = np.unique(x, True, True, True, axis=0)
+        indices, inverse_indices, counts = specify_int64(
+            indices, inverse_indices, counts
+        )
+        # behavior changed with numpy >= 2.0
+        inverse_indices = inverse_indices.reshape(-1)
+        # print(y)
+        # [[1. 0. 0.]
+        #  [2. 3. 4.]]
+        # print(indices)
+        # [0 2]
+        # print(inverse_indices)
+        # [0 0 1]
+        # print(counts)
+        # [2 1]
+
+        expect(
+            node_sorted,
+            inputs=[x],
+            outputs=[y, indices, inverse_indices, counts],
+            name="test_unique_sorted_with_axis",
+        )
+
+    @staticmethod
+    def export_sorted_with_axis_3d() -> None:
+        node_sorted = onnx.helper.make_node(
+            "Unique",
+            inputs=["X"],
+            outputs=["Y", "indices", "inverse_indices", "counts"],
+            sorted=1,
+            axis=1,
+        )
+
+        x = np.array(
+            [
+                [[1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0]],
+                [[1.0, 1.0], [0.0, 1.0], [2.0, 1.0], [0.0, 1.0]],
+            ],
+            dtype=np.float32,
+        )
+        y, indices, inverse_indices, counts = np.unique(x, True, True, True, axis=1)
+        indices, inverse_indices, counts = specify_int64(
+            indices, inverse_indices, counts
+        )
+        # behavior changed with numpy >= 2.0
+        inverse_indices = inverse_indices.reshape(-1)
+        # print(y)
+        # [[[0. 1.]
+        #  [1. 1.]
+        #  [2. 1.]]
+        # [[0. 1.]
+        #  [1. 1.]
+        #  [2. 1.]]]
+        # print(indices)
+        # [1 0 2]
+        # print(inverse_indices)
+        # [1 0 2 0]
+        # print(counts)
+        # [2 1 1]
+        expect(
+            node_sorted,
+            inputs=[x],
+            outputs=[y, indices, inverse_indices, counts],
+            name="test_unique_sorted_with_axis_3d",
+        )
+
+    @staticmethod
+    def export_sorted_with_negative_axis() -> None:
+        node_sorted = onnx.helper.make_node(
+            "Unique",
+            inputs=["X"],
+            outputs=["Y", "indices", "inverse_indices", "counts"],
+            sorted=1,
+            axis=-1,
+        )
+
+        x = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 3]], dtype=np.float32)
+        y, indices, inverse_indices, counts = np.unique(x, True, True, True, axis=-1)
+        indices, inverse_indices, counts = specify_int64(
+            indices, inverse_indices, counts
+        )
+        # behavior changed with numpy >= 2.0
+        inverse_indices = inverse_indices.reshape(-1)
+        # print(y)
+        # [[0. 1.]
+        #  [0. 1.]
+        #  [3. 2.]]
+        # print(indices)
+        # [1 0]
+        # print(inverse_indices)
+        # [1 0 0]
+        # print(counts)
+        # [2 1]
+
+        expect(
+            node_sorted,
+            inputs=[x],
+            outputs=[y, indices, inverse_indices, counts],
+            name="test_unique_sorted_with_negative_axis",
+        )
+
+    @staticmethod
+    def export_length_1() -> None:
+        node_sorted = onnx.helper.make_node(
+            "Unique",
+            inputs=["X"],
+            outputs=["Y", "indices", "inverse_indices", "counts"],
+            sorted=1,
+        )
+
+        x = np.array([0], dtype=np.int64)
+        y, indices, inverse_indices, counts = np.unique(x, True, True, True)
+        indices, inverse_indices, counts = specify_int64(
+            indices, inverse_indices, counts
+        )
+        # behavior changed with numpy >= 2.0
+        inverse_indices = inverse_indices.reshape(-1)
+        # print(y)
+        # [0]
+        # print(indices)
+        # [0]
+        # print(inverse_indices)
+        # [0]
+        # print(counts)
+        # [1]
+
+        expect(
+            node_sorted,
+            inputs=[x],
+            outputs=[y, indices, inverse_indices, counts],
+            name="test_unique_length_1",
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/unsqueeze.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/unsqueeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..d52284486740c69014ab70fd196ad84c261c3cfb
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/unsqueeze.py
@@ -0,0 +1,91 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Unsqueeze(Base):
+    @staticmethod
+    def export_unsqueeze_one_axis() -> None:
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+
+        for i in range(x.ndim):
+            axes = np.array([i]).astype(np.int64)
+            node = onnx.helper.make_node(
+                "Unsqueeze",
+                inputs=["x", "axes"],
+                outputs=["y"],
+            )
+            y = np.expand_dims(x, axis=i)
+
+            expect(
+                node,
+                inputs=[x, axes],
+                outputs=[y],
+                name="test_unsqueeze_axis_" + str(i),
+            )
+
+    @staticmethod
+    def export_unsqueeze_two_axes() -> None:
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        axes = np.array([1, 4]).astype(np.int64)
+
+        node = onnx.helper.make_node(
+            "Unsqueeze",
+            inputs=["x", "axes"],
+            outputs=["y"],
+        )
+        y = np.expand_dims(x, axis=1)
+        y = np.expand_dims(y, axis=4)
+
+        expect(node, inputs=[x, axes], outputs=[y], name="test_unsqueeze_two_axes")
+
+    @staticmethod
+    def export_unsqueeze_three_axes() -> None:
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        axes = np.array([2, 4, 5]).astype(np.int64)
+
+        node = onnx.helper.make_node(
+            "Unsqueeze",
+            inputs=["x", "axes"],
+            outputs=["y"],
+        )
+        y = np.expand_dims(x, axis=2)
+        y = np.expand_dims(y, axis=4)
+        y = np.expand_dims(y, axis=5)
+
+        expect(node, inputs=[x, axes], outputs=[y], name="test_unsqueeze_three_axes")
+
+    @staticmethod
+    def export_unsqueeze_unsorted_axes() -> None:
+        x = np.random.randn(3, 4, 5).astype(np.float32)
+        axes = np.array([5, 4, 2]).astype(np.int64)
+
+        node = onnx.helper.make_node(
+            "Unsqueeze",
+            inputs=["x", "axes"],
+            outputs=["y"],
+        )
+        y = np.expand_dims(x, axis=2)
+        y = np.expand_dims(y, axis=4)
+        y = np.expand_dims(y, axis=5)
+
+        expect(node, inputs=[x, axes], outputs=[y], name="test_unsqueeze_unsorted_axes")
+
+    @staticmethod
+    def export_unsqueeze_negative_axes() -> None:
+        node = onnx.helper.make_node(
+            "Unsqueeze",
+            inputs=["x", "axes"],
+            outputs=["y"],
+        )
+        x = np.random.randn(1, 3, 1, 5).astype(np.float32)
+        axes = np.array([-2]).astype(np.int64)
+        y = np.expand_dims(x, axis=-2)
+        expect(node, inputs=[x, axes], outputs=[y], name="test_unsqueeze_negative_axes")
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/upsample.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/upsample.py
new file mode 100644
index 0000000000000000000000000000000000000000..99e9d5f3f10ca1e3a7d447698f671bca48dc1587
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/upsample.py
@@ -0,0 +1,58 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx import helper
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Upsample(Base):
+    @staticmethod
+    def export_nearest() -> None:
+        node = onnx.helper.make_node(
+            "Upsample",
+            inputs=["X", "scales"],
+            outputs=["Y"],
+            mode="nearest",
+        )
+
+        data = np.array(
+            [
+                [
+                    [
+                        [1, 2],
+                        [3, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
+
+        output = np.array(
+            [
+                [
+                    [
+                        [1, 1, 1, 2, 2, 2],
+                        [1, 1, 1, 2, 2, 2],
+                        [3, 3, 3, 4, 4, 4],
+                        [3, 3, 3, 4, 4, 4],
+                    ]
+                ]
+            ],
+            dtype=np.float32,
+        )
+
+        expect(
+            node,
+            inputs=[data, scales],
+            outputs=[output],
+            name="test_upsample_nearest",
+            opset_imports=[helper.make_opsetid("", 9)],
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/where.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/where.py
new file mode 100644
index 0000000000000000000000000000000000000000..87ec32f481113fac1784acee9997245e2d53eb2d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/node/where.py
@@ -0,0 +1,42 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+class Where(Base):
+    @staticmethod
+    def export() -> None:
+        node = onnx.helper.make_node(
+            "Where",
+            inputs=["condition", "x", "y"],
+            outputs=["z"],
+        )
+
+        condition = np.array([[1, 0], [1, 1]], dtype=bool)
+        x = np.array([[1, 2], [3, 4]], dtype=np.float32)
+        y = np.array([[9, 8], [7, 6]], dtype=np.float32)
+        z = np.where(condition, x, y)  # expected output [[1, 8], [3, 4]]
+        expect(node, inputs=[condition, x, y], outputs=[z], name="test_where_example")
+
+    @staticmethod
+    def export_long() -> None:
+        node = onnx.helper.make_node(
+            "Where",
+            inputs=["condition", "x", "y"],
+            outputs=["z"],
+        )
+
+        condition = np.array([[1, 0], [1, 1]], dtype=bool)
+        x = np.array([[1, 2], [3, 4]], dtype=np.int64)
+        y = np.array([[9, 8], [7, 6]], dtype=np.int64)
+        z = np.where(condition, x, y)  # expected output [[1, 8], [3, 4]]
+        expect(
+            node, inputs=[condition, x, y], outputs=[z], name="test_where_long_example"
+        )
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/test_case.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/test_case.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ef94220f105c30442b4c248dc0fae6d19488642
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/test_case.py
@@ -0,0 +1,29 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from collections.abc import Sequence
+
+    import numpy as np
+
+    import onnx
+
+
+@dataclass
+class TestCase:
+    name: str
+    model_name: str
+    url: str | None
+    model_dir: str | None
+    model: onnx.ModelProto | None
+    data_sets: Sequence[tuple[Sequence[np.ndarray], Sequence[np.ndarray]]] | None
+    kind: str
+    rtol: float
+    atol: float
+    # Tell PyTest this isn't a real test.
+    __test__: bool = False
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/utils.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff754be4333d625d4ae5923eeff949c05735e670
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/case/utils.py
@@ -0,0 +1,45 @@
+# Copyright (c) ONNX Project Contributors
+
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import importlib
+import pkgutil
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+from onnx import ONNX_ML
+
+if TYPE_CHECKING:
+    from types import ModuleType
+
+all_numeric_dtypes = [
+    np.int8,
+    np.int16,
+    np.int32,
+    np.int64,
+    np.uint8,
+    np.uint16,
+    np.uint32,
+    np.uint64,
+    np.float16,
+    np.float32,
+    np.float64,
+]
+
+
+def import_recursive(package: ModuleType) -> None:
+    """Takes a package and imports all modules underneath it."""
+    pkg_dir = package.__path__
+    module_location = package.__name__
+    for _module_loader, name, ispkg in pkgutil.iter_modules(pkg_dir):
+        module_name = f"{module_location}.{name}"  # Module/package
+        if not ONNX_ML and module_name.startswith(
+            "onnx.backend.test.case.node.ai_onnx_ml"
+        ):
+            continue
+
+        module = importlib.import_module(module_name)
+        if ispkg:
+            import_recursive(module)
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/cmd_tools.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/cmd_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b004bb28e825a2e90760c460bbaed031c51e924
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/cmd_tools.py
@@ -0,0 +1,190 @@
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import argparse
+import json
+import os
+import shutil
+import warnings
+
+import onnx.backend.test.case.model as model_test
+import onnx.backend.test.case.node as node_test
+from onnx import ONNX_ML, TensorProto, numpy_helper
+
+TOP_DIR = os.path.realpath(os.path.dirname(__file__))
+DATA_DIR = os.path.join(TOP_DIR, "data")
+
+
+def generate_data(args: argparse.Namespace) -> None:
+    def prepare_dir(path: str) -> None:
+        if os.path.exists(path):
+            shutil.rmtree(path)
+        os.makedirs(path)
+
+    # Clean the output directory before generating data for node testcases
+    # It is used to check new generated data is correct in CIs
+    node_root = os.path.join(args.output, "node")
+    original_dir_number = len(
+        [name for name in os.listdir(node_root) if os.path.isfile(name)]
+    )
+    if args.clean and os.path.exists(node_root):
+        for sub_dir in os.listdir(node_root):
+            if ONNX_ML or not sub_dir.startswith("test_ai_onnx_ml_"):
+                shutil.rmtree(os.path.join(node_root, sub_dir))
+
+    cases = model_test.collect_testcases()
+    # If op_type is specified, only include those testcases including the given operator
+    # Otherwise, include all of the testcases
+    if args.diff:
+        cases += node_test.collect_diff_testcases()
+    else:
+        cases += node_test.collect_testcases(args.op_type)
+    node_number = 0
+
+    for case in cases:
+        output_dir = os.path.join(args.output, case.kind, case.name)
+        prepare_dir(output_dir)
+        if case.kind == "node":
+            node_number += 1
+        if case.kind == "real":
+            with open(os.path.join(output_dir, "data.json"), "w") as fi:
+                json.dump(
+                    {
+                        "url": case.url,
+                        "model_name": case.model_name,
+                        "rtol": case.rtol,
+                        "atol": case.atol,
+                    },
+                    fi,
+                    sort_keys=True,
+                )
+        else:
+            assert case.model
+            with open(os.path.join(output_dir, "model.onnx"), "wb") as f:
+                f.write(case.model.SerializeToString())
+            assert case.data_sets
+            for i, (inputs, outputs) in enumerate(case.data_sets):
+                data_set_dir = os.path.join(output_dir, f"test_data_set_{i}")
+                prepare_dir(data_set_dir)
+                for j, input in enumerate(inputs):
+                    with open(os.path.join(data_set_dir, f"input_{j}.pb"), "wb") as f:
+                        if case.model.graph.input[j].type.HasField("map_type"):
+                            f.write(
+                                numpy_helper.from_dict(
+                                    input, case.model.graph.input[j].name
+                                ).SerializeToString()
+                            )
+                        elif case.model.graph.input[j].type.HasField("sequence_type"):
+                            f.write(
+                                numpy_helper.from_list(
+                                    input, case.model.graph.input[j].name
+                                ).SerializeToString()
+                            )
+                        elif case.model.graph.input[j].type.HasField("optional_type"):
+                            f.write(
+                                numpy_helper.from_optional(
+                                    input, case.model.graph.input[j].name
+                                ).SerializeToString()
+                            )
+                        else:
+                            assert case.model.graph.input[j].type.HasField(
+                                "tensor_type"
+                            )
+                            if isinstance(input, TensorProto):
+                                f.write(input.SerializeToString())
+                            else:
+                                f.write(
+                                    numpy_helper.from_array(
+                                        input, case.model.graph.input[j].name
+                                    ).SerializeToString()
+                                )
+                for j, output in enumerate(outputs):
+                    with open(os.path.join(data_set_dir, f"output_{j}.pb"), "wb") as f:
+                        if case.model.graph.output[j].type.HasField("map_type"):
+                            f.write(
+                                numpy_helper.from_dict(
+                                    output, case.model.graph.output[j].name
+                                ).SerializeToString()
+                            )
+                        elif case.model.graph.output[j].type.HasField("sequence_type"):
+                            f.write(
+                                numpy_helper.from_list(
+                                    output, case.model.graph.output[j].name
+                                ).SerializeToString()
+                            )
+                        elif case.model.graph.output[j].type.HasField("optional_type"):
+                            f.write(
+                                numpy_helper.from_optional(
+                                    output, case.model.graph.output[j].name
+                                ).SerializeToString()
+                            )
+                        else:
+                            assert case.model.graph.output[j].type.HasField(
+                                "tensor_type"
+                            )
+                            if isinstance(output, TensorProto):
+                                f.write(output.SerializeToString())
+                            else:
+                                f.write(
+                                    numpy_helper.from_array(
+                                        output, case.model.graph.output[j].name
+                                    ).SerializeToString()
+                                )
+    if not args.clean and node_number != original_dir_number:
+        warnings.warn(
+            "There are some models under 'onnx/backend/test/data/node' which cannot not"
+            " be generated by the script from 'onnx/backend/test/case/node'. Please add"
+            " '--clean' option for 'python onnx/backend/test/cmd_tools.py generate-data'"
+            " to cleanup the existing directories and regenerate them.",
+            Warning,
+            stacklevel=2,
+        )
+
+
+def parse_args() -> argparse.Namespace:
+    parser = argparse.ArgumentParser("backend-test-tools")
+    subparsers = parser.add_subparsers()
+
+    subparser = subparsers.add_parser(
+        "generate-data", help="convert testcases to test data."
+    )
+    subparser.add_argument(
+        "-c",
+        "--clean",
+        default=False,
+        action="store_true",
+        help="Clean the output directory before generating data for node testcases.",
+    )
+    subparser.add_argument(
+        "-o",
+        "--output",
+        default=DATA_DIR,
+        help="output directory (default: %(default)s)",
+    )
+    subparser.add_argument(
+        "-t",
+        "--op_type",
+        default=None,
+        help="op_type for test case generation. (generates test data for the specified op_type only.)",
+    )
+    subparser.add_argument(
+        "-d",
+        "--diff",
+        default=False,
+        action="store_true",
+        help="only generates test data for those changed files (compared to the main branch).",
+    )
+    subparser.set_defaults(func=generate_data)
+
+    return parser.parse_args()
+
+
+def main() -> None:
+    args = parse_args()
+    args.func(args)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/light/README.md b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/light/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..787c0a3dcc213a92da75b5109335ec0dd3338a66
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/light/README.md
@@ -0,0 +1,16 @@
+
+
+# Light models
+
+The models in this folder were created by replacing
+all float initializers by nodes `ConstantOfShape`
+with function `replace_initializer_by_constant_of_shape`.
+The models are lighter and can be added to the repository
+for unit testing.
+
+Expected outputs were obtained by using CReferenceEvaluator
+implemented in [PR 4952](https://github.com/onnx/onnx/pull/4952).
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/light/light_shufflenet.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/light/light_shufflenet.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..8f30725f663d75936a44895ee3a109fe2f1afedd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/light/light_shufflenet.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6f406d62be36d6b4572542c0950a2abd59f56237068793290680bba89fbafe5
+size 67666
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_abs/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_abs/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..17cf800c1d362a6f799e3688fc709d82790798fc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_abs/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:87e8c85a3c5ac0401781fd1327a04a8eec7b8fac8df7e813b2bed5134403905a
+size 97
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..24436e906b16a136fa7ae42ea5ffd7be13846344
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aaaff584d11aa92b677a4a71899beb469126bf2b6c576f2be1d61566db52454f
+size 91
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..88128780e03bf763149dd948c9cf0d40a0bb2071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acos_example/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:703de0c3be505a78596235789682bc4e6f0e8523f34fb731207b4c076701abdd
+size 21
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..843cfd0470938ceca1fe0082b5fc285329ded0f9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:30ab226b20d339612014c98f4f0a3997297293bf8cb50ae15bce3d15fd42e3ee
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1140447391371b7966891cc5f151a2c1f19f66ae
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acosh/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1692a5be4affd722d2c82d4e26eea97fcffeb85e067e75acb1cbbdeb1fa456a7
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..331c57656769664831c1d43b37e65acfd5f34f50
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_acosh_example/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1b3de92cda4a410cf189bfbdde29176f95b3919a2042ea3c80e32800e3cb3c7f
+size 21
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..c142651f24437eda02f83341d454134c05f7848f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5bfd337a65cfc3213420fa06841eb88a6d3b8cfbef6235099fc4950f2c94bfe8
+size 320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..815b7a1a21c44a7787c0caa8b319d07fdf87b418
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c44a418da7d4f82602975579ba1cdda8fdab51f0606a3ee096c9314ea0c68be9
+size 15
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..b5cf06076d1b02eb6e21c7267863b641f48a2960
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fbdda18e2257329a378bc02490c04a7e853fe88f3e6107adb9eee5cec9509de8
+size 17
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..815b7a1a21c44a7787c0caa8b319d07fdf87b418
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c44a418da7d4f82602975579ba1cdda8fdab51f0606a3ee096c9314ea0c68be9
+size 15
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f6a713c716572c10cc6d3e57576acba905bbb76d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cd209068326e49f8d5f3d94e2a5fdef0502f69874383a52b1f69f87b2902f5b2
+size 14
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..4830c09b2e19ed128f9b46d183fb4f0cac7617e1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2d086bc46c15bc8ad8096e4457d699ca662af4f34932b5222e4e6640921e356
+size 18
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..85899ba850eb5e7a7dbeebef654daad1ea73f72a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50a4c8438339f3fe9ba7e2c16a039efdde15470ae2b3b2056779e2d1b5a2565f
+size 18
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d84927a014148a95c49fc043a6a01a88559f258f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a95550f2372bf749abcc0d7785d5d55f5833308e582a6d6df79d1c9d3811522
+size 22
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7715d3216f1e5dc83ba096539a7ebbec29cad13c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adagrad_multiple/test_data_set_0/output_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d2db9fc09b516918d14c7090643afbfdaaeb946be33e1393aaf099253f57db23
+size 22
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..815b7a1a21c44a7787c0caa8b319d07fdf87b418
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c44a418da7d4f82602975579ba1cdda8fdab51f0606a3ee096c9314ea0c68be9
+size 15
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..64f0d05b50f671719de7bf339ed3ba6538e56c09
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aba5acc9dbec722e5924d8aa0de8f52aad7b4334b7c4e3d612bbae2c8828a2b4
+size 17
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d61b5b6ad90be73b7598b1872073503a017c0633
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b13087d18cf644a39bb101a66dd06a37ff95bac00e83bfbd5ff621b5168aa22
+size 11
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3646541999dfac3811ce5c1e8f259daf8a269bf3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03ee3416024db9c3cd15bfd692c6cccb467e13f10857b445d033fdd24a9b6c2e
+size 18
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..831514ad9ab87825c9a6415cd84764819fc16f13
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b179919a8dec2bc0f85af8c06809d3a2de75c963317d0e06c684e23022ac770
+size 14
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..4830c09b2e19ed128f9b46d183fb4f0cac7617e1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_adam_multiple/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2d086bc46c15bc8ad8096e4457d699ca662af4f34932b5222e4e6640921e356
+size 18
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..58f5af5f3237bae3e4133f0888f56b1ed0292c51
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0180f5665325e343077736e4903fa43513ae714ceb131deee45b842370df289a
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..58f5af5f3237bae3e4133f0888f56b1ed0292c51
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0180f5665325e343077736e4903fa43513ae714ceb131deee45b842370df289a
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3af3fa0a61a4b0e8f02553740690e4abb5091689
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_bcast/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d89fd14b53313d11325cc455b8f17a927a1465c9e3cf8d4a08fa0b4dca8cc1b
+size 29
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_int16/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_int16/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..433c868ccdbc8eb13379046b9b57957b4df81953
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_int16/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e663c6a9f3f17dd4a2cfb784031d0dee26c9b2d1c0d4bed1753bee85b3fe268
+size 135
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_int8/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_int8/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d28e82e73b49a1876137bf6d0db83cb04a9c00c8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_int8/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a028af280a811bf07e1a1a3cdf14335fc87c810e1479060c448b63831c856bd
+size 73
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_int8/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_int8/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e2cf403c368d46df37ed36886257a9559c41bc80
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_int8/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6dcbf725da0114c4655a5841b00b6e8786f0b58a8b725cdbe8d477d49ec47a25
+size 75
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_uint32/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_uint32/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3e7eddeb415033f52a8f75ab3c6c88596d77a082
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_uint32/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a9ccb546c67b962cbe8db1b9e9a2657f63d4615087f2718fbeedab02828f5bd1
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_uint64/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_uint64/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d319c83d7e2c457900beaa9692d51d3f49805a3d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_add_uint64/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c03dac19128bd9e29996a1f132287e2cdb0b6c009672a86e28412b6442908573
+size 494
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..301c9ceed2255173ddf8e120069692cc16e6e112
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4d865924e9378c3f4176c6398ba20ea50490a2dafa95d132e60d88303b16286
+size 65
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..301c9ceed2255173ddf8e120069692cc16e6e112
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_align_corners_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4d865924e9378c3f4176c6398ba20ea50490a2dafa95d132e60d88303b16286
+size 65
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..301c9ceed2255173ddf8e120069692cc16e6e112
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_2d_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4d865924e9378c3f4176c6398ba20ea50490a2dafa95d132e60d88303b16286
+size 65
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a67c7a5c7ec585084cb0fc8b5a1fceeba9607b2f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a5454147c5fbc725c4cacee5ced9780869b3acdf30f2aa9c36b6a27e3019ca3b
+size 2901
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..6000e249c1c27664a68748ccbf026c32c89233c6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f287f38024fd6edefa31c26a2d16c6ebe4552c39ff16ede52cce5fbc842f9579
+size 52
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..bd45857eb7b0fe652cea745f1c65424022e2aa0e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_align_corners_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cacd57330a8a4014fcc20fa73508932e7d1ba90f1c3522157086fe448219d5bd
+size 2901
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..bb09e13fa8c27d4271697941a1c9c0bbad4f2d3d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18a2a1254591393360a8c17404137abb58d6c67813524bdf9aaf75c52c3cba31
+size 113
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a67c7a5c7ec585084cb0fc8b5a1fceeba9607b2f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_affine_grid_3d_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a5454147c5fbc725c4cacee5ced9780869b3acdf30f2aa9c36b6a27e3019ca3b
+size 2901
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..5be9e71c259c9879477655e81e5f6caf0382d6e5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_array_feature_extractor/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5c8d34aa16af6242a5fda1ca7a2f1adc81ff76c2f2848fd8e7b277f30161914d
+size 59
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..bc6d1527aa4765d4ccc07781230587a782ef89a1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:306233d2bb7af438846aa85214ace58b5e65e15845cfdf50ff2b069f29c682ff
+size 164
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c0d75ec3ce0d9e8cb40e8a38407cf70e350a5cc9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_binarizer/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:20788723269b9582f339e281df6fa80f0da06611b90685eba3a86ba68b1d7a0b
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..da5e4e1ba8c3e421ef492c2d13428f8da9aabbe1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b443af123faaf82e5b0cfa7e4d14dccc679f58b38f312688f4eb2d408a157ae8
+size 210
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7603cc1cd32cfe9d1ea9c0f73e3f0890cbf6a9a7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_string_int_no_default/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:87d63ddcaec19922f25a5680f7b650abbc2fe1bc0bf6c197609c1538c726ee00
+size 49
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..7191f897265ac9a0e8a3f4b8e9efcf28643841c9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd7882fd3f02e3fb582557eeebf718609df2323ae640cb44c1c2896db7b22175
+size 288
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e99f0a5224e76c5e89617f8750157765f76b35b3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10845137c90b1da0d41afc32971d464dc95f367431665a8b6df89f8cc5eada72
+size 22
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a676ffd27ad524607aa3412aaa0628a5a2d4d7f0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_label_encoder_tensor_mapping/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c726942a470f21886be3834bacddbf861ddb18a9fc317ac95b2a51b2763bc25f
+size 19
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..0cc8b30d6e8f1f26a7dc07ca713b99c6dd67f01e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_set_membership/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f6e35d89c68474042317f73a30fa4377a5605018edb8c033f9ec672d52dbc5d2
+size 656
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..9693128e2d60ea06dd8cfcc62637a02ffb946dd8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_ai_onnx_ml_tree_ensemble_single_tree/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7dc8998cc56877d2f743778b93a8b84ed97e0a070d67146d2d4d234ca9508c46
+size 59
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..6d26947c1a76452030b54f44a2557a4e13a6c283
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:828666c5e8221193e34535d271489053d7e9c4f9dd17812a019cbca1e4e86bc9
+size 23
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..4ff4772731fcd64f790af6d7d82b991c8d9f4537
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and2d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19c8e670cd77c1d5c046f80512e6413d6f7696ab28dbee6c40c989888668bd62
+size 25
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..706737f3a71c33491b50789dfce99fccf1165e6e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0ebe24e5da6936a11a5a616c3145f0fcf6e9ed0a04a6a917bf806876ab7bccc4
+size 73
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f5bc0251f6d1e62133c6f9a9a8a9c1c168fcca5b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and3d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8290c29ebb7039165f7df70dce27416edc98d9a4d156270096fb2ba958163bf
+size 75
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..796b256da73a7ea57b96ba38db5ff2ffa68109fc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7c9d9ae60379d225562bd427491fe4083d7a344e60a9e8a682c45e2df7401201
+size 143
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..dcfbf19893fccf53099ed6f9934bba5d898cef6a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0c2b153980d431ea6bfcf593a4995d1009d12ef5ff04844f8c66ea1635b71e9
+size 376
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..93222ffb4f73c7c802ba73fdbeda35f38c2d9b9a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f49694bd75517ff3584d1c185ad3d8b1b4a82890f03973d9b91060158ee960f7
+size 376
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..496d93cb79632812d51b6efb710e0364c03f3a2f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and4d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef3e8372a5383d80f83411b48418c1a10b67f40014d475e8190a7f3fa1d7e596
+size 378
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7eb42d8bfbf0fef3253d1a5887eedcdc5e631ae5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast3v1d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:db36f9707b9d18748e58e15c8e030142d4d125ee3c85b95dd76f1a43685b0b2f
+size 75
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..ceb57d97d131d52c4a239c92fc974b48156e1507
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dba80b60d23d790112a8acd6cf1d73ac4f180c308d22f7c013fd26fc5a1e4f71
+size 133
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8f9331042c399009948b63ee25637adf55c86a0a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_and_bcast4v3d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b89dcaf3ca790713e70faa595e6aaf14bad80e4a652fe2ba04084b1f23cbbba
+size 378
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..2c90d3ccde168e6cbe9303be6aea56eb968084ef
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fa2a106adb032b3d9c74314015fe24565b7fad3561104493d88a28cff668fcb7
+size 157
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..fdfb5c7880f859c004e99b16c2461a4a8f747169
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f03266416470e0d9a16e064d15e99f6434b627582518daef6941407a38f532d9
+size 201
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..52a6484ec995b58c020e2be22fc1996e8fd68f85
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_default_axis_random_select_last_index/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0e119a1e3b3c495f0eb9f18ccb523bb34f913a84b0bca2a6112db08f8e116955
+size 114
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0
+size 30
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c79ff510c4a7f3e3b3b736e9fc9eb297f63a39d7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e05d3e3d1557470cc4296d74740066f50ce42cdcd5691a0c9182e2ec2bd2a670
+size 32
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..18607d72b989e820bfa4b8a7e1874f5af48e4a69
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_example_select_last_index/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:37987711b1000dd60a939702d5794c1ecc69a163d70d809bb084db01dc349bcc
+size 203
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..afd4c1c8caa46b3bfc0ce9aa7048cac491a38ff0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ad2d309385710d7fa583dcb5f674e699d8501d75c2818088204f8400d7bf3c3
+size 166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..6198d9df26a052dc7272c6155dbc7f0685229f94
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c712948f59cf972ea8677d436f56654fbee7d9630e73ddcf84696cfebe3a3722
+size 82
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..257bf7a169f022a2f754c7b93e053eb127dba3cf
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c64f168361054c05707d7cbf0c02ca85d63e3021eb7505bef5f1231c3353a02d
+size 210
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..6198d9df26a052dc7272c6155dbc7f0685229f94
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_keepdims_random_select_last_index/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c712948f59cf972ea8677d436f56654fbee7d9630e73ddcf84696cfebe3a3722
+size 82
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..3d7fde82008ce337e30a4e88e021524b8f8fc719
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e01bd3b80e010db5ab252fa67d51edd9b16a9e12e059f9fc1d126f724d3ae1ca
+size 182
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0
+size 30
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c79ff510c4a7f3e3b3b736e9fc9eb297f63a39d7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e05d3e3d1557470cc4296d74740066f50ce42cdcd5691a0c9182e2ec2bd2a670
+size 32
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_example_select_last_index/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0
+size 30
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1039b1fbdf2cad0285ba2946646897c925909de0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_negative_axis_keepdims_random_select_last_index/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aebd74b532e085c2e4322df494c197fb2c5493f6018baaca2fc9b75678edb222
+size 66
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..aaad4419111e51ef4bb367937a9637a6598cc848
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a222bc115eab44665d54922b089d2ff4857259b5adaa0669c3e31b598836a1f0
+size 30
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2db76e3c5577ed696fae8150a0ada9f67845974d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:539ef4826803f8528dad7f8be46e8738858e4ad4c4f3e169722cbe07ae3a9145
+size 30
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..6bd3e0b54b777e675793297bb8e1f9da1b01ddfb
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abfa73be6ec1f45d3a0066830a6fb2a5227b88e3b8934f12a8a667216c67e585
+size 30
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..a654e26812e09d5f9bf212769efd62907a590106
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68cf1d54c3c91350c8dd4a7e25590726f54f463ffb516eee353c65cbd8fe6951
+size 165
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..797edfc74634bdff04f1e4ac677528df91505faf
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea687052cf2cecfcd3325e199b6d94342c00423461305f776bbbbe061a945685
+size 209
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmax_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_default_axis_random_select_last_index/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_keepdims_random_select_last_index/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3660190fdb924678baf00602cfc5f2eda8d8df68
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fb2e68eb1b9128cd443cf8306d0e36a2efc14507305b70759830f5b1296ea62
+size 30
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..ac5bab847db10f055a3ecc4ceba5d56f14065d82
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:434c81deb13de3351c760a1989eba7c344d9c876082afecf1affbfcc83460f03
+size 32
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..4d855e3601a3ae885f0961e15ea9ff5c23df416e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_negative_axis_keepdims_example_select_last_index/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9eef17eef1618fdef2120b9187f980e4188c6f289432e4b0446474ed3925a9e4
+size 226
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..aeba3dd7342bc9abb733e0d14e1cd172b989e453
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_example_select_last_index/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2b4e8b6d9535c99bbd9502eb88e19e169b931db334676b77ef5771c07e573ae
+size 30
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..436d73a79959451fdcc5e6fb9c7dc561c303b178
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d64e6d5f85f7e8ed85a3dfb8f2a16c463dde007daa2d252d4b66a5321df684a3
+size 80
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4c01d80dbae2686e0971795fcf142530bdba388
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bff015de9bf3d8c0c608e9f38ef7e3f85bdb6ab2ee7187b724013747f6b35e35
+size 112
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..436d73a79959451fdcc5e6fb9c7dc561c303b178
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_argmin_no_keepdims_random_select_last_index/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d64e6d5f85f7e8ed85a3dfb8f2a16c463dde007daa2d252d4b66a5321df684a3
+size 80
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asin/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asin/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..d677a2ec7cb57253cbade21989fc95864007ed3c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asin/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:588ee1b78578a8fbe67a67c324db645045b1492dba57b30d211b232661a8b16a
+size 99
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asin/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asin/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..691aa143f8af9be7b79e4cf0a81853feb1fa8d0c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asin/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:05c023929a1b32b4bdbd7882ff5e53b9b53762ee2929c66be76d0f0ba7ca393d
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..721ef45b24499df9725ea659c338543e41d889b4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asin_example/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:165943d2cf4746f85aacd3c1a17504b3368893dd4c47bf53cf4b353b0cd7a22f
+size 21
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asinh/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asinh/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..05b85d466d525a51dafee7d03f50609d9b8aa2db
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asinh/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:478650c3468b3a05d52a5c01839686f1a38660c86359d80cd00312a4c3213e1a
+size 101
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asinh/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asinh/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..58f5af5f3237bae3e4133f0888f56b1ed0292c51
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asinh/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0180f5665325e343077736e4903fa43513ae714ceb131deee45b842370df289a
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e54897184bb7b348bb72d44576fa6b73ef20e6a9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_asinh_example/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f05cfad2b4770c8c438309b0ec431a934f03f72b7433a05bf69b176c7d69f24c
+size 21
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atan/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atan/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..58f5af5f3237bae3e4133f0888f56b1ed0292c51
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atan/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0180f5665325e343077736e4903fa43513ae714ceb131deee45b842370df289a
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..f169449542d035bff03df85cbec49401a9b26372
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atan_example/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a25204314bc85740c4bb9f43b6149f69d59fb0e9b4b483c719c99479f4ffcbd1
+size 91
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..691aa143f8af9be7b79e4cf0a81853feb1fa8d0c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:05c023929a1b32b4bdbd7882ff5e53b9b53762ee2929c66be76d0f0ba7ca393d
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f7caaf2894d24fc405b6888749e53683a32964bc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atanh/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d79ee91541caf9cf3cae12fe4448290bbfd99ff68b654e8a52464b5cf1df5061
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..88128780e03bf763149dd948c9cf0d40a0bb2071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_atanh_example/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:703de0c3be505a78596235789682bc4e6f0e8523f34fb731207b4c076701abdd
+size 21
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f89a6e9c008afc5a5630bd4b34d6de663f72d433
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7290085ee514c0639af5142c407f579cf2f78b0af359dc77b9f2c134fa93b7cc
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e8075977e8115d2ea4f312f657275da7c0a9f799
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:292e8058e6de4ba5aed2fb6fdf71ab9c86482c8ac8eaed6f0e97f782d1e0775c
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e8075977e8115d2ea4f312f657275da7c0a9f799
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:292e8058e6de4ba5aed2fb6fdf71ab9c86482c8ac8eaed6f0e97f782d1e0775c
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..25a0888e51cf24e98c03228977195a9460a78d5d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_attn_mask_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d70967b27a4d9cd78fe32edd19f09a80b30d3e312b7c69444019fc11a05dfce
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2b7ac354a455fe60b729f3596bacb9ca2a2fe249
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9774f35584c8321c9624da85f853ed9574c6dbe70abb74f241df7773610e8eb2
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..0e8570d6d40ed902d6c2e0ed130765ff82d3785f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f874e7268a2c89d78205201d18bac4af225e064a343564979a5fa80c55a2cc4d
+size 11650
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_causal_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..fd4722a396f7d05940c87ac5fc1986b23e547296
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f069f70e4f7af876bdbb12334cb2444279160fee0fad6d4a627950a3391f6231
+size 227
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4d8fd86b528301e8eda11d4c5ea840fc454d731
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b5ccdd15e425b5ab582ab0d455bc81ebc15682e7716dfa8ac69b52c45fe155
+size 1454
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..ad1e12f291d32592572b8b0c4790cde2c45cac6f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2f09f9c90356f2778d05cf61a24177d19f7586e3e756bf8deb40fb34779749f
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..41959c9ec0309e1858f83ef8aa7ce53fcf047a98
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6279c45c16ae25c6fa61c4dae969fee5b74cd96fea0c1f6df7aa4ff722021b67
+size 15097
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4d8fd86b528301e8eda11d4c5ea840fc454d731
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b5ccdd15e425b5ab582ab0d455bc81ebc15682e7716dfa8ac69b52c45fe155
+size 1454
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4d8fd86b528301e8eda11d4c5ea840fc454d731
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b5ccdd15e425b5ab582ab0d455bc81ebc15682e7716dfa8ac69b52c45fe155
+size 1454
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..3e892f627b7554b07764e6c1a456c3ddd67437b3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a5a937e4575c0cf7c6ecf9d7565759721ba705e23dfab19d0a4d994ae41b77cd
+size 14219
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8850d4483083505d73031bc62c054ba0cae90415
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_causal_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2bace1e2fa041548d20741a3b02a12cedc12528c9e3d27701dfe8214f75936ab
+size 974
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..112a91b3447325073d600b1dbbd1f5500d8300c7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:629f7c87dc610ffcd0f04fc2bfdbcd401c98a2fd1ee1ac4a858700099b3736f5
+size 12344
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4d8fd86b528301e8eda11d4c5ea840fc454d731
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b5ccdd15e425b5ab582ab0d455bc81ebc15682e7716dfa8ac69b52c45fe155
+size 1454
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..b3a81386f139c99fca1f4420f8692a66d2c7f65c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c44a7b749cb0b4e413d3cc14ff846da217f56d367adfbb9ce34af24aa99533ff
+size 974
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4d8fd86b528301e8eda11d4c5ea840fc454d731
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b5ccdd15e425b5ab582ab0d455bc81ebc15682e7716dfa8ac69b52c45fe155
+size 1454
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..98d8bf88f4a1004bb0ca1083777bed5ea52586a2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:81f31d3df7ec4b22ec6fa24e1f952f522226581b851a00cf941edcf97dc1c8e7
+size 974
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..ebef29eb236c53d6c8476e7c2acd4565d68666cd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:31bc5f159dce18e34714ff8feec4120dbf3c4191ea229b5518a81cf261048e29
+size 13322
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4d8fd86b528301e8eda11d4c5ea840fc454d731
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b5ccdd15e425b5ab582ab0d455bc81ebc15682e7716dfa8ac69b52c45fe155
+size 1454
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..43645895cd3aae970cf46929139dcece1833e452
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d915a6cc6fdfcd75bcfb2352c0a24f24d0201846c35da06b2da310061a71372
+size 254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4d8fd86b528301e8eda11d4c5ea840fc454d731
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b5ccdd15e425b5ab582ab0d455bc81ebc15682e7716dfa8ac69b52c45fe155
+size 1454
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8520d4c4db2848522b20c398b016631dfce52c4a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2047b55aeabbda923cdb56fb1086389c6e49aab8d41b74f1fe1ead3743903d53
+size 974
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..c4d8fd86b528301e8eda11d4c5ea840fc454d731
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b5ccdd15e425b5ab582ab0d455bc81ebc15682e7716dfa8ac69b52c45fe155
+size 1454
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8e15e614f96447933eb363a163bc721afafa848f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fc90b9c0ae38a94a9de91a825b035664e41929cb06724d9fc90264bac48155c
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..6fb45e19bf10e0b1a2140b2672ae86a5afafa6ea
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a996ed282a09c6f0c709d813174f8d5c9332ef8c74ffac2e1c0fe13faa71480f
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f9dd9a8b3363ca45c0f4ff64142023cefb24fe85
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2fb0df02cc517d32cf7666accc9b586b3d870beee3ce7deb7ad3990a9635d437
+size 4348
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..f9a9a65df02c60661a118b1837237b6ff3a4c04a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5eb611a7d83fcf88cbc1468d8559c6af6673ffd54c0b45871c644d6d6d1721f0
+size 16471
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8e15e614f96447933eb363a163bc721afafa848f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fc90b9c0ae38a94a9de91a825b035664e41929cb06724d9fc90264bac48155c
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..ef2f1f18f8dd414a894a6cb6c2f0063d2545030e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a8c94f5ac40f39f0bee0bdc8fe4e8d4d977d7e987d1f5505d189971214ed66b2
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0d7622f6b7b0470fe94eec0688eeae7c5c047029
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26d3bf33cccd19c2153a3c0a31298eaaa391fe429933eb2067f560ebc2ad8896
+size 2905
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..6fb45e19bf10e0b1a2140b2672ae86a5afafa6ea
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_diff_heads_with_past_and_present_expanded/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a996ed282a09c6f0c709d813174f8d5c9332ef8c74ffac2e1c0fe13faa71480f
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f89a6e9c008afc5a5630bd4b34d6de663f72d433
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7290085ee514c0639af5142c407f579cf2f78b0af359dc77b9f2c134fa93b7cc
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..06b4733207624ce0fbb8b4fbb5aa1a39cccfaf0e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1cd3cc648809473b680937ed3c79e8579409b430de70916429fdd6a85c627dc1
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..4e51838e8908e5d8fe5d5c6d62af3ce848f6cbbe
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52e46e4f6396f85209821870b59c7d8e147eeeab97d84705bf0f41f585f6e7bb
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..72654b35ac86fb6bf9877c5433d63acf630b20bc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b5c825e1b8bd5a85d5bedd9c6a632db3eae3c6ee6f61d7d0b2fab0e43ce06a1
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a73612dffc6d29af68072ef97e44fb0104acddb4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efc90f71a9ce6dc64b6b993235191d676f11d418896812d9b0c0c6ea035e9128
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..72654b35ac86fb6bf9877c5433d63acf630b20bc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_attn_mask_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b5c825e1b8bd5a85d5bedd9c6a632db3eae3c6ee6f61d7d0b2fab0e43ce06a1
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..06b4733207624ce0fbb8b4fbb5aa1a39cccfaf0e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1cd3cc648809473b680937ed3c79e8579409b430de70916429fdd6a85c627dc1
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a73612dffc6d29af68072ef97e44fb0104acddb4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efc90f71a9ce6dc64b6b993235191d676f11d418896812d9b0c0c6ea035e9128
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1fc174c836ccc43912c10aa6730912a3e766bada
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:329e6da0f2c8cc69ae0ee49070e0cdacf1e6c34fc393dc4e22b18bdb81d544ce
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..84c6cf03aec5282efd1bdd331d4361ff728ddc2e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff7e41a15a8e4351f7607fb12a87908b97928ef4c564925bdedc0e33cbcd586d
+size 12254
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..06b4733207624ce0fbb8b4fbb5aa1a39cccfaf0e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1cd3cc648809473b680937ed3c79e8579409b430de70916429fdd6a85c627dc1
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a73612dffc6d29af68072ef97e44fb0104acddb4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efc90f71a9ce6dc64b6b993235191d676f11d418896812d9b0c0c6ea035e9128
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_causal_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a73612dffc6d29af68072ef97e44fb0104acddb4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efc90f71a9ce6dc64b6b993235191d676f11d418896812d9b0c0c6ea035e9128
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..4e51838e8908e5d8fe5d5c6d62af3ce848f6cbbe
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52e46e4f6396f85209821870b59c7d8e147eeeab97d84705bf0f41f585f6e7bb
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a73612dffc6d29af68072ef97e44fb0104acddb4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efc90f71a9ce6dc64b6b993235191d676f11d418896812d9b0c0c6ea035e9128
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f50b388d31ba454329e99170717eae08be501be6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:980a7b11c4f6c759580c489d355dc2b0cb89febcb7820e9ba4ea2be53adfd058
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..572f8d100dffdf624c21f35555a00e5df49e1632
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b37d56e03913b9465d3975afa37c8162f7db8165f8c877ff7a17033496ec2967
+size 11487
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..06b4733207624ce0fbb8b4fbb5aa1a39cccfaf0e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1cd3cc648809473b680937ed3c79e8579409b430de70916429fdd6a85c627dc1
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f50b388d31ba454329e99170717eae08be501be6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_scaled_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:980a7b11c4f6c759580c489d355dc2b0cb89febcb7820e9ba4ea2be53adfd058
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a73612dffc6d29af68072ef97e44fb0104acddb4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efc90f71a9ce6dc64b6b993235191d676f11d418896812d9b0c0c6ea035e9128
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..cfffbbe1a640c3605f58c8343972dfb579fe151c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3cf924b764ff745a2478894fa43bf4851d38b80c9e995e4f4044049f6f8a0ec3
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..06b4733207624ce0fbb8b4fbb5aa1a39cccfaf0e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1cd3cc648809473b680937ed3c79e8579409b430de70916429fdd6a85c627dc1
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a73612dffc6d29af68072ef97e44fb0104acddb4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efc90f71a9ce6dc64b6b993235191d676f11d418896812d9b0c0c6ea035e9128
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..cfffbbe1a640c3605f58c8343972dfb579fe151c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_softcap_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3cf924b764ff745a2478894fa43bf4851d38b80c9e995e4f4044049f6f8a0ec3
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f3b6939ce90b0df2701b6afad3942cbebf311aa5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fec3de4b82ef735a98f0caea56a7ab35f0f3fc522e30b78e58c3ae95144fcace
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..b5705df5d82509d938104a3a12b22cfefb007519
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef8d9eb6c1f29e6629d003312ba613e977b30b96d29006cf79a6d27adc0c3dcd
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d2b2d53c6c2c8923d5bffc9ea566fd45ea226071
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9209a69aa8decac428ed991b8f999d1ab8f74a67e0b6aa96fc3dfceba456c746
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f3b6939ce90b0df2701b6afad3942cbebf311aa5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fec3de4b82ef735a98f0caea56a7ab35f0f3fc522e30b78e58c3ae95144fcace
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f8c677bcc08cb5918e9e64891ab515ec98f2a145
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad3a58963238830223bc48cc11b4bf24cffc92847fd61158376476bc6fea5ce7
+size 2318
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..b5705df5d82509d938104a3a12b22cfefb007519
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef8d9eb6c1f29e6629d003312ba613e977b30b96d29006cf79a6d27adc0c3dcd
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e8d10b760942f258b11323ff7e6c27f6dc68c5bb
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_gqa_with_past_and_present_expanded/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:590eadcc746dfa49ac11d49481c5cc69b32d3820849bde6f7a3f2337f2ec5057
+size 3484
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..1b48b8d9fa2583f0e6afb029a2786461d6ccc004
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:089ab08c8e31927eba93a08a99b129008088000be69772c2fef6128330124bd3
+size 234
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..fe8cd5d6efc40e518026baf82e3bd93228adb471
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6bfa103a6db319aad88671eccf2b40e33f43b9127881f2a954a81c90981bea5
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..7de48db4a90520ad3a0b438b72537f197ac50a49
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:75cbda7ff10ba6a01f7daf01da152670f1c144aeb346aa5602786490b148507d
+size 10923
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..fe8cd5d6efc40e518026baf82e3bd93228adb471
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_scaled_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6bfa103a6db319aad88671eccf2b40e33f43b9127881f2a954a81c90981bea5
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..df4636c373a5a245c97dacb7e2ab790fba3e7249
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9a14512118f895f1d80e6d824af1703317abc8853e7e328be76b4e82d6a3d422
+size 237
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8fbd0bc2474bacdb36159897afbaee269cf050f1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ebc62e043a6b44b95c2b597ed3d0e9077d4b1e65e252b6dd3a2cb2a5602930f
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..639df2b2e89233735f970ebd46f3e798ba9c2b1d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb641af4b5ccafb86fa35c41ef8e05419a98c6fe77c0c9c0e7f8d838865f7ba9
+size 11733
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8fbd0bc2474bacdb36159897afbaee269cf050f1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_softcap_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ebc62e043a6b44b95c2b597ed3d0e9077d4b1e65e252b6dd3a2cb2a5602930f
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..9cb15f5aae0b213f5f078ece4b17778ec03c1aae
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6c1d757189f62ce7d3c2ddfd95833cd1a87f9aa9617782557389986d6b1612eb
+size 233
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..88f359ac140c6663213798826b54b34ea8c5f5bd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:638727056bbf9438d17877b492fa84eafdd0fe96e134d1c84097e0bbddecc202
+size 109
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a935870716a55b3e53d98a246c3da03070925ee1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fac242d990cbbda861ef7507332f3eb9bc4ed5d34cd41b071f7e0473d7c5f320
+size 109
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..88f359ac140c6663213798826b54b34ea8c5f5bd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:638727056bbf9438d17877b492fa84eafdd0fe96e134d1c84097e0bbddecc202
+size 109
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..dbda1e905bccdc2c4bbbdd9f487ca74ec4cc636b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15746c4412641c3f796a04637e6b57a2faac6ad7018d51b65ac8816d98fe4405
+size 109
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..ac74e7b98b815bfd330ab81fa05f5bbe3ad26676
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:006e82629cf79f27fd5eea9ee23bfa90924b56aa7850fdd69a9c4abbbe6185e0
+size 109
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a935870716a55b3e53d98a246c3da03070925ee1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_transpose_verification_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fac242d990cbbda861ef7507332f3eb9bc4ed5d34cd41b071f7e0473d7c5f320
+size 109
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1cd277d8f36c46ba0c7135ad54ef44a20833376a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc6d28acaef37bb9f455b6320183cd10b3bd07ac4b7298d265a5cf1fa818e0a
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7238eec4d5c7f590935f3ce5c5874022d0ecd61e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48ecb41bd80052eb1ed78aaeaed699105b0e45ac9e39c906fc818446e630b55
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..50658387f7577b3b3505db25ee27f5fbad0407e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04cba949f0303a74abad7f5f2ce8cd1ec58d9fb43afdfac86cc850002bf19f15
+size 2329
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8003a933926dd585a6cbc086eddceeab28588051
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5c1265d7d9e6379f41e03470a3eadc197cc3dde6294c395b9ef76af7752bb38c
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d72e1ece138065bde23c01c624b91cab9c6b8d10
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58c5adacffb5b5ee1f5129295e398f4186d728b455750c5eb6017528452dbcac
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..07194bb36ccdfd95d8a560241770d46bf2b29662
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a8cc233fa23cc9440c86b6af8ed681597d2659a723897c29b629ee8ba66b929
+size 3484
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..dbde416232ceb2d7bc0481564a4b733e7193f008
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9bb6282b2958344e691264d91f15d48b84fd22bde641e07cbaaac2fc731bb57a
+size 14737
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7238eec4d5c7f590935f3ce5c5874022d0ecd61e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48ecb41bd80052eb1ed78aaeaed699105b0e45ac9e39c906fc818446e630b55
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..50658387f7577b3b3505db25ee27f5fbad0407e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04cba949f0303a74abad7f5f2ce8cd1ec58d9fb43afdfac86cc850002bf19f15
+size 2329
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8003a933926dd585a6cbc086eddceeab28588051
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5c1265d7d9e6379f41e03470a3eadc197cc3dde6294c395b9ef76af7752bb38c
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d72e1ece138065bde23c01c624b91cab9c6b8d10
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58c5adacffb5b5ee1f5129295e398f4186d728b455750c5eb6017528452dbcac
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..07194bb36ccdfd95d8a560241770d46bf2b29662
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_expanded/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a8cc233fa23cc9440c86b6af8ed681597d2659a723897c29b629ee8ba66b929
+size 3484
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..c02b675db325fe99aa683a2b260d06ebe61c4e99
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:66ed4bb0d83e3e41b66c2b8c2ee256b5224233376867064bc5118e37bdbb0e0d
+size 549
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1cd277d8f36c46ba0c7135ad54ef44a20833376a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc6d28acaef37bb9f455b6320183cd10b3bd07ac4b7298d265a5cf1fa818e0a
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..07194bb36ccdfd95d8a560241770d46bf2b29662
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a8cc233fa23cc9440c86b6af8ed681597d2659a723897c29b629ee8ba66b929
+size 3484
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/output_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/output_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..4a91b4f044637179ddc34486df8fa5e43a3f94d6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul/test_data_set_0/output_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:17426ed9ffd78df7de75375c470d4e96df9b0b7517ddebe742e53f378682b64f
+size 1759
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..c61a788bd8b64dba4360d905edd11cb3015f0355
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b3da2faf3347ebeec1ddb24b09f3a609c1deada3766318f39cb834d1210d9d36
+size 584
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1cd277d8f36c46ba0c7135ad54ef44a20833376a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc6d28acaef37bb9f455b6320183cd10b3bd07ac4b7298d265a5cf1fa818e0a
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7238eec4d5c7f590935f3ce5c5874022d0ecd61e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48ecb41bd80052eb1ed78aaeaed699105b0e45ac9e39c906fc818446e630b55
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..50658387f7577b3b3505db25ee27f5fbad0407e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04cba949f0303a74abad7f5f2ce8cd1ec58d9fb43afdfac86cc850002bf19f15
+size 2329
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8003a933926dd585a6cbc086eddceeab28588051
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5c1265d7d9e6379f41e03470a3eadc197cc3dde6294c395b9ef76af7752bb38c
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1cd277d8f36c46ba0c7135ad54ef44a20833376a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc6d28acaef37bb9f455b6320183cd10b3bd07ac4b7298d265a5cf1fa818e0a
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7238eec4d5c7f590935f3ce5c5874022d0ecd61e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_bias_expanded/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48ecb41bd80052eb1ed78aaeaed699105b0e45ac9e39c906fc818446e630b55
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1cd277d8f36c46ba0c7135ad54ef44a20833376a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc6d28acaef37bb9f455b6320183cd10b3bd07ac4b7298d265a5cf1fa818e0a
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7238eec4d5c7f590935f3ce5c5874022d0ecd61e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48ecb41bd80052eb1ed78aaeaed699105b0e45ac9e39c906fc818446e630b55
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..50658387f7577b3b3505db25ee27f5fbad0407e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04cba949f0303a74abad7f5f2ce8cd1ec58d9fb43afdfac86cc850002bf19f15
+size 2329
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d72e1ece138065bde23c01c624b91cab9c6b8d10
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58c5adacffb5b5ee1f5129295e398f4186d728b455750c5eb6017528452dbcac
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..07194bb36ccdfd95d8a560241770d46bf2b29662
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a8cc233fa23cc9440c86b6af8ed681597d2659a723897c29b629ee8ba66b929
+size 3484
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/output_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/output_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..4a91b4f044637179ddc34486df8fa5e43a3f94d6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_expanded/test_data_set_0/output_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:17426ed9ffd78df7de75375c470d4e96df9b0b7517ddebe742e53f378682b64f
+size 1759
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1cd277d8f36c46ba0c7135ad54ef44a20833376a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc6d28acaef37bb9f455b6320183cd10b3bd07ac4b7298d265a5cf1fa818e0a
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7238eec4d5c7f590935f3ce5c5874022d0ecd61e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48ecb41bd80052eb1ed78aaeaed699105b0e45ac9e39c906fc818446e630b55
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d72e1ece138065bde23c01c624b91cab9c6b8d10
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58c5adacffb5b5ee1f5129295e398f4186d728b455750c5eb6017528452dbcac
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1cd277d8f36c46ba0c7135ad54ef44a20833376a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc6d28acaef37bb9f455b6320183cd10b3bd07ac4b7298d265a5cf1fa818e0a
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7238eec4d5c7f590935f3ce5c5874022d0ecd61e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48ecb41bd80052eb1ed78aaeaed699105b0e45ac9e39c906fc818446e630b55
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..50658387f7577b3b3505db25ee27f5fbad0407e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04cba949f0303a74abad7f5f2ce8cd1ec58d9fb43afdfac86cc850002bf19f15
+size 2329
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d72e1ece138065bde23c01c624b91cab9c6b8d10
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58c5adacffb5b5ee1f5129295e398f4186d728b455750c5eb6017528452dbcac
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..07194bb36ccdfd95d8a560241770d46bf2b29662
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a8cc233fa23cc9440c86b6af8ed681597d2659a723897c29b629ee8ba66b929
+size 3484
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/output_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/output_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..b52ba987b532ddd61fe40c16096d02a361dba074
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softcap_expanded/test_data_set_0/output_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:84d0f87c1756f926fb17a29d5314d6c7d453dd2a93f02cbadfd05ec219f0aed2
+size 1759
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1cd277d8f36c46ba0c7135ad54ef44a20833376a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc6d28acaef37bb9f455b6320183cd10b3bd07ac4b7298d265a5cf1fa818e0a
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d72e1ece138065bde23c01c624b91cab9c6b8d10
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58c5adacffb5b5ee1f5129295e398f4186d728b455750c5eb6017528452dbcac
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/output_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/output_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..09dec32bd0a9255b19fe2f275e30a549ee6846ec
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax/test_data_set_0/output_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca65e2ad9c7b829b8a0888d2b6934f557fbb8e5a1a8701d6246a3e83a751597e
+size 1759
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..13941c42797276ed8e46afcad519f1f987c51b69
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3db16a49da35cca4cda201162d268d99695afd0ce58455dbfcdb7de78d33c776
+size 17756
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d6ee3f2e07e5d832dfb72a7fcd48f8741922f551
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:faf58c310b96057411bea33b700192a38516cd7909ba8d876b6f2d622120d2bf
+size 782
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..95a57f53a319137cb128964127375191fc40e627
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ce754d0a1e90dcd4656fcfbad045bfee9c15b2b5dc35200e59276a20eea5641
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3962fc0739a74d8335a082b77b963c30dd0d163f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d47e5bd8508926df02a86bbcde245328816417554bf71c70773f476cc366344
+size 1166
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7238eec4d5c7f590935f3ce5c5874022d0ecd61e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48ecb41bd80052eb1ed78aaeaed699105b0e45ac9e39c906fc818446e630b55
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..50658387f7577b3b3505db25ee27f5fbad0407e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04cba949f0303a74abad7f5f2ce8cd1ec58d9fb43afdfac86cc850002bf19f15
+size 2329
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..07194bb36ccdfd95d8a560241770d46bf2b29662
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a8cc233fa23cc9440c86b6af8ed681597d2659a723897c29b629ee8ba66b929
+size 3484
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/output_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/output_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..09dec32bd0a9255b19fe2f275e30a549ee6846ec
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_3d_with_past_and_present_qk_matmul_softmax_expanded/test_data_set_0/output_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca65e2ad9c7b829b8a0888d2b6934f557fbb8e5a1a8701d6246a3e83a751597e
+size 1759
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..7f4613f41115ab042439cd2ed89af3fa61f5eaf4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:75fbbb15136542d1c8fb9311fce771bba1ef6c86454a6061d7c1aa288d275252
+size 185
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d1ff87a3c2bfe9c75797cabf53adb19f935280c5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e77dae1e627eefba58d5815c134f4f0fc05c204e4ae6554f04d7326f03d4af8
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..26edb7fd46e5ba7cbb8d8ffcf5abfd3dd97f6aa8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e941c6d267c918dadb7a5c6d12d1242753927cd2e96578210b0f890a1fdd2a1
+size 235
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e8075977e8115d2ea4f312f657275da7c0a9f799
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:292e8058e6de4ba5aed2fb6fdf71ab9c86482c8ac8eaed6f0e97f782d1e0775c
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..185acc711b1461bdb38da9e71c54ac4f6d3cabcd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82884e28dd942856879301e0d1a9d5d5929d8003d13133774315467e8b53162a
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..ec13f04183a68ee7b3244d89df9ad0083048392d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0526d9fb286861d444331341942e2805f3b67bb7279407a877f60944cda7bab
+size 246
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..50770e978d87c3d1bce2fbdb246727ee3e737302
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6ee926919c9177e9bc867cb4e3df9d82f25d9af2b5537e7fb01b44af5d2e3f65
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..c7625159dfe63d8f4917a0829d7f568c763bd494
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9769ec79d93b915326b97334abbb22418eaf50b10475ea46d82839be6861eb2
+size 271
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d8fa9592e5e595da6526286c3f7f3e39dd261921
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8790ed53b1403d9ab260da2bf6734ac5960d697ecb95f6c5fd3d06e51e0e5b4
+size 216
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d8fa9592e5e595da6526286c3f7f3e39dd261921
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_causal_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8790ed53b1403d9ab260da2bf6734ac5960d697ecb95f6c5fd3d06e51e0e5b4
+size 216
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..80a621b790e0d70dbd38e91da027a5702fb8b413
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d092bb8ae177ba65f1462e2bc8815362ed00be14782860ef58c3314768257b1
+size 10278
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..50770e978d87c3d1bce2fbdb246727ee3e737302
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_3d_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6ee926919c9177e9bc867cb4e3df9d82f25d9af2b5537e7fb01b44af5d2e3f65
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0228e690b1b659df28b692470202925bfd51b3af
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c00a7db4b8d86efe8787babb272db317e42bbee6a7d56dfb77b86bf576391b19
+size 600
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7136a827298afd9dd2055931660c61454eb5a6ec
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4c86cbdde7e70a7b66abebe2b3ff15c3e3b7819ea2a2434f882bce27c5916b1
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..a15c7e833f0c3f2ebd3c7cf6f83ddf43ae5f8cd8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:08dc97edde7a37efab67833cf59df263e0db146fa7b68e9d4941f479e71b0f85
+size 11995
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0228e690b1b659df28b692470202925bfd51b3af
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c00a7db4b8d86efe8787babb272db317e42bbee6a7d56dfb77b86bf576391b19
+size 600
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..b3ca53e6a7dff4d85b5d9c6f4fd282cb39bcbedf
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_causal_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38ef527cc741bf0bf8709a8cbb53f00a458e3be4e4abb724397bc48697263575
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0228e690b1b659df28b692470202925bfd51b3af
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c00a7db4b8d86efe8787babb272db317e42bbee6a7d56dfb77b86bf576391b19
+size 600
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7136a827298afd9dd2055931660c61454eb5a6ec
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_4d_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4c86cbdde7e70a7b66abebe2b3ff15c3e3b7819ea2a2434f882bce27c5916b1
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..c24e2a12c57fada57950c7fcd95e5ab2bc1f4a02
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09c99ce3aa93fb11af7430007b3adc25638be425db6f6184e4ee227ffc24cc26
+size 240
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d1ff87a3c2bfe9c75797cabf53adb19f935280c5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e77dae1e627eefba58d5815c134f4f0fc05c204e4ae6554f04d7326f03d4af8
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..dfd45b9141ccab40c0fec8b96b2e18254a774336
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b711071e022be553f30cabf2212725e50488f0371ba22a4b575f0a274cec59c
+size 251
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..ee4249ef21a1cd72c6244e4326d1ec5f2cac803f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:24a5f5653c97061ab2b0d5c7f27b52fa66e8fb6ed8a88fd26593d08e8ab03a51
+size 168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d1ff87a3c2bfe9c75797cabf53adb19f935280c5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e77dae1e627eefba58d5815c134f4f0fc05c204e4ae6554f04d7326f03d4af8
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d1ff87a3c2bfe9c75797cabf53adb19f935280c5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_4d_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e77dae1e627eefba58d5815c134f4f0fc05c204e4ae6554f04d7326f03d4af8
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_bool_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e8075977e8115d2ea4f312f657275da7c0a9f799
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:292e8058e6de4ba5aed2fb6fdf71ab9c86482c8ac8eaed6f0e97f782d1e0775c
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..185acc711b1461bdb38da9e71c54ac4f6d3cabcd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_attn_mask_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82884e28dd942856879301e0d1a9d5d5929d8003d13133774315467e8b53162a
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..a49cbae7ac600cdf884889a9c07e46f83a4b738f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:46f99869aa859aff239dd279d62905efa2f9d8a4e52456769e210fea79bc044d
+size 210
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..40d80e55c46fbcf3d23ec2a1eb74982e642c88e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f21d428aef9e36a9c405c8b7b60f959d69490a1bd225522a5c91815b70fe399e
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..40d80e55c46fbcf3d23ec2a1eb74982e642c88e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_causal_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f21d428aef9e36a9c405c8b7b60f959d69490a1bd225522a5c91815b70fe399e
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8b4360fb33fe6822799d0f090a121ec46abd8633
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:27fbc15b0a8f5d0ffab34b2c9cd9bb96a8cc90b7a19013a9e9b749fd17becf1a
+size 40
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..ef942ab6375f720311a94acd8fcbc7278caa1594
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9097df3e39352a87ccba40fb8e62aa3bb2631b10a78fc27b202e6c7b68694cf
+size 14557
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..52ac79503d336e1c3f1d00156e3186c02d285a08
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dcc9b68abefbb1fe8a564824c4e47501902f6946a0b53ae73fc3542e00b84040
+size 408
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..b9ac60e8f9b9ce33ed432622d3b1322e18a23b54
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_mask4d_padded_kv_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:112083944dd00760887067824002bca26bb274ff127721d9349943d415943447
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e481875a0c93071387db2136866056cee7b3c0f7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be660164b657bab7b0055776e8293d64c0036d14d51fa98b2bcc6cdff48cfd91
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..ad1e12f291d32592572b8b0c4790cde2c45cac6f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2f09f9c90356f2778d05cf61a24177d19f7586e3e756bf8deb40fb34779749f
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..098943dd03185622dad88fed91c5604bf06966c1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69d01e4c1346b577966b8b6d5cabaea1d58a0d38c77cce423b0be38a2354403e
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..0758ca3e12a8574654fb91b9a132764e2ca348e9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abbc08a873a1002d311ff5af074eb141abb556260f0f3087c6c0b37912f7253e
+size 12008
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..ad1e12f291d32592572b8b0c4790cde2c45cac6f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2f09f9c90356f2778d05cf61a24177d19f7586e3e756bf8deb40fb34779749f
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..098943dd03185622dad88fed91c5604bf06966c1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_attn_mask_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69d01e4c1346b577966b8b6d5cabaea1d58a0d38c77cce423b0be38a2354403e
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..5cbfeebe69aae49310e09954430c4417d2ff8bee
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:61f68d1362df6c096d8382cc527e1da1ed92e9618f55afe61ef8d7b2369ceda1
+size 227
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..53fb2820dc2d93d9eafc47c2fe8a7b3f8cef4466
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6bf890343c22ad298c167ba6f1f850156e5979ad8dd07bbba14c935a2ab1cbb3
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..53fb2820dc2d93d9eafc47c2fe8a7b3f8cef4466
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_causal_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6bf890343c22ad298c167ba6f1f850156e5979ad8dd07bbba14c935a2ab1cbb3
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..5e516a6b789ce889679492cbf2e003f6b1ccc615
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a35670c82dd2074a43084880894a26d3e271198b3790579927fc6bd43be7f8f
+size 9565
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..e481875a0c93071387db2136866056cee7b3c0f7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be660164b657bab7b0055776e8293d64c0036d14d51fa98b2bcc6cdff48cfd91
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..bfd605b69cf457c49645158ade1e21876744d828
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2766bd944c9b0847c00d843bb272fcb8a10d91985b72f9224cb5e7b567b6d9b7
+size 226
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7d3cf9c0312fc67cf9cde0e0c4d7851c2115e801
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79f555139b026770e23c8ffdfd0885f83d411fed60aee8ecac51e31fbd475135
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..62232f88ab7b217a0013546b5ce9cc8c3d3b91f9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:02e93ca5325f9dd184e2c21bdd3d453ecbe85fa4da40ea265aea37f4c2be0f4c
+size 10326
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7d3cf9c0312fc67cf9cde0e0c4d7851c2115e801
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_scaled_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79f555139b026770e23c8ffdfd0885f83d411fed60aee8ecac51e31fbd475135
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..22edfc7a71fbb5833767350094507c0e9477077d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef32a7434dc2f53423c84dcca3f9f1c03b5152b4024876b0e66c077e25550563
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..22edfc7a71fbb5833767350094507c0e9477077d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_sizes_softcap_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef32a7434dc2f53423c84dcca3f9f1c03b5152b4024876b0e66c077e25550563
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8e15e614f96447933eb363a163bc721afafa848f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fc90b9c0ae38a94a9de91a825b035664e41929cb06724d9fc90264bac48155c
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..ef2f1f18f8dd414a894a6cb6c2f0063d2545030e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a8c94f5ac40f39f0bee0bdc8fe4e8d4d977d7e987d1f5505d189971214ed66b2
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0d7622f6b7b0470fe94eec0688eeae7c5c047029
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26d3bf33cccd19c2153a3c0a31298eaaa391fe429933eb2067f560ebc2ad8896
+size 2905
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2bcbf0a499ad5d636505d0893696d2faa5ba9c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c936936d4059a13d2c7ad7c701dd92f4325c221da4d57fc71408c3cfd8873c5
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..36889bc073500363c1e0e8b8175d3e33ebbf91a7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8eb9ff0334537d01e0baf21f110375a94c5303446a254771bc28221800947f1a
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8e15e614f96447933eb363a163bc721afafa848f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fc90b9c0ae38a94a9de91a825b035664e41929cb06724d9fc90264bac48155c
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0d7622f6b7b0470fe94eec0688eeae7c5c047029
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26d3bf33cccd19c2153a3c0a31298eaaa391fe429933eb2067f560ebc2ad8896
+size 2905
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2bcbf0a499ad5d636505d0893696d2faa5ba9c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c936936d4059a13d2c7ad7c701dd92f4325c221da4d57fc71408c3cfd8873c5
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..36889bc073500363c1e0e8b8175d3e33ebbf91a7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_expanded/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8eb9ff0334537d01e0baf21f110375a94c5303446a254771bc28221800947f1a
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a3e738434e065c590191a8039fbd701913f848f1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:35cd6912e38c1e19a590551a400080458d1fb49a905b8333006b1709fdc70ae8
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..089fcce6b6e3d21c580675442acd6edec1c3ff8b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:586c3217623fa53504598349a380d7071c448bd61c5c097168046d6c7c1fa8d9
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d_expanded/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d_expanded/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..a3e738434e065c590191a8039fbd701913f848f1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask3d_expanded/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:35cd6912e38c1e19a590551a400080458d1fb49a905b8333006b1709fdc70ae8
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..a7747c8445b8e07b14761408fcfcb46e4e9b7108
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c74cc8d8425f1a75b769b4de7cb219ded9f224cdcf96dfc68675a8a047509907
+size 477
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..33005dfdc82b86a878beceaf331ac07b0c48ef5f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f5adf15cb03dc57ec365788f1ef1bcd4dda8989ce701819c6da127a0f3114507
+size 2905
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f2e8963ce70f62a2de5f186f57c05aeaa4f9bb76
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da9db6ad22b7afc8ba41c2b5eee1febe9a066af311073e4b637a31df328ca933
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..187cab566fe4248bc731096810c707bcc9202827
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfe2de8dd9662a85efbbd2e7c59d68e2fbe75a55ae0a7ecb78b063a4c52ed8da
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3cc031f97099262af1e2fbb28d9302399c7c8fc9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:342e676327955eb779d24fc31bf7f9d63d1ddb7cda3c050af69c13c116805100
+size 4348
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..decb5b03ba64afbdda356b18ec0799a6e3f3e450
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:46e9ba0df04694d1c2e38ad8f2013a74df8bfb131af147213f7915659ca94b8a
+size 14085
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..0e2db44f1b22f71bb6559e6e0a94d246c33e25e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453a435d983754dceaf1620c764e32ad225ca7ab672ae1962241485dc934b82c
+size 1456
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..33005dfdc82b86a878beceaf331ac07b0c48ef5f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f5adf15cb03dc57ec365788f1ef1bcd4dda8989ce701819c6da127a0f3114507
+size 2905
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f2e8963ce70f62a2de5f186f57c05aeaa4f9bb76
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da9db6ad22b7afc8ba41c2b5eee1febe9a066af311073e4b637a31df328ca933
+size 976
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/output_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/output_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..187cab566fe4248bc731096810c707bcc9202827
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/output_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfe2de8dd9662a85efbbd2e7c59d68e2fbe75a55ae0a7ecb78b063a4c52ed8da
+size 3482
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..3cc031f97099262af1e2fbb28d9302399c7c8fc9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_diff_heads_with_past_and_present_mask4d_expanded/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:342e676327955eb779d24fc31bf7f9d63d1ddb7cda3c050af69c13c116805100
+size 4348
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..c84a4b600b38eb3de9b15f270a95d3ae9e0b71db
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06825117cff333c5d77a4a6831f874740166269d11f7768a3938881739f480a9
+size 7691
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d1ff87a3c2bfe9c75797cabf53adb19f935280c5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e77dae1e627eefba58d5815c134f4f0fc05c204e4ae6554f04d7326f03d4af8
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_fp16/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_fp16/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1369a338f545c7264baf8d4673f8ae7a8809fc48
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_fp16/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51622d7f23759bffc9ec9cc13eedc3669a09105939d62b98070016114a498f49
+size 592
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_fp16_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_fp16_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..229996d2a5c8f86fffaf30d61899a1640a7d2868
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_fp16_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6a3f82639c002f8af8bc21d064ae098073341fed227739754a0184a6b838822c
+size 592
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_fp16_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_fp16_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1369a338f545c7264baf8d4673f8ae7a8809fc48
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_fp16_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51622d7f23759bffc9ec9cc13eedc3669a09105939d62b98070016114a498f49
+size 592
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..7a714b462cdada609697b37c8efe1d96a357a4fd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91138361a26e40c0acd63631e3c6cbbf7e0d45af5332e1d8a9ca28224dab24f9
+size 189
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..db8e851a2bd440d86e787037235ff4608b7be855
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63d28c0121a7d6ce208716e8705f55a27a5cce34ccbb875256d6ca469ee4d60
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..df9870cc7f300a43f9cfa7aa66251b0bb5ee96a6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331eb31b1f8f4b0eb87f7de7d9af53b4e1784453249fc2ce4e0deb880ec3a8ae
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..054058b34da16590e88957f43baff866dfc795f2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09a36b0c461524bb5b5e310c7a292492df71f0db22da97255eabcccc7b5e73c9
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..d570fa094d5b3888ec914e5638f9e127f3a33460
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d2778fda7a9225c6bdb5e005dd0fdae248fb725bdc5763b77d81682d9557b3b
+size 239
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..db8e851a2bd440d86e787037235ff4608b7be855
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63d28c0121a7d6ce208716e8705f55a27a5cce34ccbb875256d6ca469ee4d60
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..df9870cc7f300a43f9cfa7aa66251b0bb5ee96a6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331eb31b1f8f4b0eb87f7de7d9af53b4e1784453249fc2ce4e0deb880ec3a8ae
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..054058b34da16590e88957f43baff866dfc795f2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09a36b0c461524bb5b5e310c7a292492df71f0db22da97255eabcccc7b5e73c9
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..db8e851a2bd440d86e787037235ff4608b7be855
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63d28c0121a7d6ce208716e8705f55a27a5cce34ccbb875256d6ca469ee4d60
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..df9870cc7f300a43f9cfa7aa66251b0bb5ee96a6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331eb31b1f8f4b0eb87f7de7d9af53b4e1784453249fc2ce4e0deb880ec3a8ae
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..054058b34da16590e88957f43baff866dfc795f2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09a36b0c461524bb5b5e310c7a292492df71f0db22da97255eabcccc7b5e73c9
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..72654b35ac86fb6bf9877c5433d63acf630b20bc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b5c825e1b8bd5a85d5bedd9c6a632db3eae3c6ee6f61d7d0b2fab0e43ce06a1
+size 115
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..698a802bb34de31e83de162eb0c2b0d6669047b7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_attn_mask_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:12e52681a78242f1c58f04b2f7987384ebd5524a0e241ecafecc2da9fb2c2a64
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..6cfc00f91a0554fdaf28a7021400008f3f04835a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:47a9d0e5f483eeb94ba30cfdc225b2cd33ef59c3d0d186fd1d10e4d054fa7f93
+size 214
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..db8e851a2bd440d86e787037235ff4608b7be855
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63d28c0121a7d6ce208716e8705f55a27a5cce34ccbb875256d6ca469ee4d60
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..df9870cc7f300a43f9cfa7aa66251b0bb5ee96a6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331eb31b1f8f4b0eb87f7de7d9af53b4e1784453249fc2ce4e0deb880ec3a8ae
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..4263c7eac70cb9dbb3b506a77264731c2e20b696
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52a7bbae0beee1db29ef1709b14b100cd33b44da0cde91a1116b10fc2b3412e5
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..24c358860550292f4a2fd219a43535a36b616e51
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cbb9b705242e3985dce88ddc5d10f13a0645e49b8b0bd16523578bc62f9d2c06
+size 9662
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..db8e851a2bd440d86e787037235ff4608b7be855
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63d28c0121a7d6ce208716e8705f55a27a5cce34ccbb875256d6ca469ee4d60
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..df9870cc7f300a43f9cfa7aa66251b0bb5ee96a6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331eb31b1f8f4b0eb87f7de7d9af53b4e1784453249fc2ce4e0deb880ec3a8ae
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..054058b34da16590e88957f43baff866dfc795f2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_causal_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09a36b0c461524bb5b5e310c7a292492df71f0db22da97255eabcccc7b5e73c9
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..054058b34da16590e88957f43baff866dfc795f2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09a36b0c461524bb5b5e310c7a292492df71f0db22da97255eabcccc7b5e73c9
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..77388c334efd631ee5f6b0554d6afaafd66173a7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85e3bf7ccfd4cc3fa16fc8e5159334cff68d653ab7f3dccc6b04f1e3d698af78
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..f36750cdac7d042e184bd4347123bcac7f4db0e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:387f5e8407b6f6a5d5d953cf29b2b7e48cfa9d066aba0c7e4ec764dc38a0071a
+size 216
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..db8e851a2bd440d86e787037235ff4608b7be855
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63d28c0121a7d6ce208716e8705f55a27a5cce34ccbb875256d6ca469ee4d60
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..df9870cc7f300a43f9cfa7aa66251b0bb5ee96a6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331eb31b1f8f4b0eb87f7de7d9af53b4e1784453249fc2ce4e0deb880ec3a8ae
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..054058b34da16590e88957f43baff866dfc795f2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09a36b0c461524bb5b5e310c7a292492df71f0db22da97255eabcccc7b5e73c9
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..72460f42865d48cf0db86374f212b0f0294cba2f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cbfaa8ce14993473c72fc954c4bc06a28a209534b26c76291b46b2e7c395ef2a
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..db8e851a2bd440d86e787037235ff4608b7be855
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63d28c0121a7d6ce208716e8705f55a27a5cce34ccbb875256d6ca469ee4d60
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..df9870cc7f300a43f9cfa7aa66251b0bb5ee96a6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331eb31b1f8f4b0eb87f7de7d9af53b4e1784453249fc2ce4e0deb880ec3a8ae
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..054058b34da16590e88957f43baff866dfc795f2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09a36b0c461524bb5b5e310c7a292492df71f0db22da97255eabcccc7b5e73c9
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..72460f42865d48cf0db86374f212b0f0294cba2f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_softcap_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cbfaa8ce14993473c72fc954c4bc06a28a209534b26c76291b46b2e7c395ef2a
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..db8e851a2bd440d86e787037235ff4608b7be855
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c63d28c0121a7d6ce208716e8705f55a27a5cce34ccbb875256d6ca469ee4d60
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..df9870cc7f300a43f9cfa7aa66251b0bb5ee96a6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331eb31b1f8f4b0eb87f7de7d9af53b4e1784453249fc2ce4e0deb880ec3a8ae
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f3b6939ce90b0df2701b6afad3942cbebf311aa5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fec3de4b82ef735a98f0caea56a7ab35f0f3fc522e30b78e58c3ae95144fcace
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d45fbd26835b5ebf9c6a057d6e5eece89844ace2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1887d42edf46db6b3611c2bc1e4c321bad2bcc28c69f00c56cdaa7181c74c35
+size 2329
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..212abed803c20095628d62dd42898ad8aaae4dba
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b5de35b5677c0a0e3484db82f55a2ab582e514708e90c036a576af2ddf23276f
+size 2320
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..5f81151acaec83cdf284d0bfc1a992d416c97987
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ac5cf1f830763888a690909f884a8492a94012fed98048614832bffd1045f4e
+size 12308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..df9870cc7f300a43f9cfa7aa66251b0bb5ee96a6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:331eb31b1f8f4b0eb87f7de7d9af53b4e1784453249fc2ce4e0deb880ec3a8ae
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_expanded/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_expanded/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..cf4e1cbafac7fb726a4f00a28a7b1daa93eae6a4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_expanded/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6ca216a305a972f8e3a6ba1651b0e648b0c7e69561d2a84043a880350cfcf8fe
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..26d54bd4516d02ec652578ad85f88723c6b0df43
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c1b6639e6ebb173ce9905fd35748ab532833fcefd3b03aa849a83fd2ccdf3172
+size 1175
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..dabbb8de45e1d7353d93b08266613b2746ceb4e8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4181fde65cefc4a4784a9cd6183d27af475ae96b9e057f8b9f0e5e969684833
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d50d8e014f94261a8ca9b37fc5035bd05f3db387
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2371206b6d905ed3e7376c4c88d6bf049e0177a88b3e9772942c3e2532c11eb3
+size 1756
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..318fda57f03a51181ef913b53c32f37233bb1728
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f963c362a982ba758967306666d387cc92e6efad1a2384f11442d290c3066704
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16_expanded/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16_expanded/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..d50d8e014f94261a8ca9b37fc5035bd05f3db387
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_gqa_with_past_and_present_fp16_expanded/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2371206b6d905ed3e7376c4c88d6bf049e0177a88b3e9772942c3e2532c11eb3
+size 1756
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..74f7ba98ba917870d2373c22251030d19865745b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9dd7716d91171bdedfa2f69922d402d5b44662ca5bbe2c653851fb8859d8459
+size 209
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/model.onnx b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/model.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..d03d4fd6dbad0186aa3b2caf0e27a3c33bed6a69
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/model.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26615e290285572e23c81c2e94e52e470d741a56ecbafe40d4b40b68a9b209c7
+size 8455
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..08eb0439ed03ac3d932f19e0e0706c3d8b6a8fd3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_scaled_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0e33fbcfbb4926f13c6ea2c1da0868731dc723039cae6d2826c182c42ac2343d
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..fa124e4d135380e03969a129cc6024a74f46fb33
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:880ce7c81864356bd95a80d6e606496f5fae6abadbebb5ae3543ffd5e80c6b49
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap_expanded/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap_expanded/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap_expanded/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap_expanded/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap_expanded/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap_expanded/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap_expanded/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap_expanded/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..fa124e4d135380e03969a129cc6024a74f46fb33
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_softcap_expanded/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:880ce7c81864356bd95a80d6e606496f5fae6abadbebb5ae3543ffd5e80c6b49
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2d58d20321690e2c8a10890ba1f23dee7cd7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23be5a166d5ed4fd58c6f6797c0d6636fe87870fa05200e5fbc46ed953dc97b5
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_1.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_1.pb
new file mode 100644
index 0000000000000000000000000000000000000000..2c694270ad69f916f7a7c0032e9e66e77198148a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_1.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53e2c38d76bb9660df12fd843188307abd6ab44d01468a3c005844fbf3894f4c
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..34b9c343a34cbc6ed0fe6e367544de5518da95c0
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae8679f7e94288ffaae1d14f9ba4b75b96c6ae432ac6df3efc6821b3d062c3a0
+size 1168
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_3.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_3.pb
new file mode 100644
index 0000000000000000000000000000000000000000..1cd277d8f36c46ba0c7135ad54ef44a20833376a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_3.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fc6d28acaef37bb9f455b6320183cd10b3bd07ac4b7298d265a5cf1fa818e0a
+size 308
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_4.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_4.pb
new file mode 100644
index 0000000000000000000000000000000000000000..7238eec4d5c7f590935f3ce5c5874022d0ecd61e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_4.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b48ecb41bd80052eb1ed78aaeaed699105b0e45ac9e39c906fc818446e630b55
+size 2327
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_5.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_5.pb
new file mode 100644
index 0000000000000000000000000000000000000000..50658387f7577b3b3505db25ee27f5fbad0407e2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/input_5.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04cba949f0303a74abad7f5f2ce8cd1ec58d9fb43afdfac86cc850002bf19f15
+size 2329
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/output_0.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/output_0.pb
new file mode 100644
index 0000000000000000000000000000000000000000..726d5feeb0ce6df8a841f399246dd3a93b55174e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/output_0.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bf2036a5e175e4079341ee7312905f4d4556667ab9ffb121b05ab2912a86c3d9
+size 784
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/output_2.pb b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/output_2.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8d8a4c71b916c550e508be968f0561ce3d7df195
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/data/node/test_attention_4d_with_past_and_present/test_data_set_0/output_2.pb
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:46810688540fc5c0dd2a5eee0cfbde23d71dac261466e16c09a073b5cd2fda5f
+size 3484
diff --git a/pythonProject/.venv/Lib/site-packages/onnx/backend/test/stat_coverage.py b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/stat_coverage.py
new file mode 100644
index 0000000000000000000000000000000000000000..653ed1d2ddc2685ef6c2b44835d60c763da2da3e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/onnx/backend/test/stat_coverage.py
@@ -0,0 +1,295 @@
+#!/usr/bin/env python
+
+# Copyright (c) ONNX Project Contributors
+#
+# SPDX-License-Identifier: Apache-2.0
+from __future__ import annotations
+
+import os
+from typing import IO, TYPE_CHECKING, Any
+
+from onnx import AttributeProto, defs, load
+from onnx.backend.test.case import collect_snippets
+from onnx.backend.test.loader import load_model_tests
+from onnx.backend.test.runner import Runner
+
+if TYPE_CHECKING:
+    from collections.abc import Sequence
+
+
+def is_ml(schemas: Sequence[defs.OpSchema]) -> bool:
+    return any(s.domain == "ai.onnx.ml" for s in schemas)
+
+
+def gen_outlines(f: IO[Any], ml: bool) -> None:
+    f.write("# Test Coverage Report")
+    if ml:
+        f.write(" (ONNX-ML Operators)\n")
+    else:
+        f.write(" (ONNX Core Operators)\n")
+    f.write("## Outlines\n")
+    f.write("* [Node Test Coverage](#node-test-coverage)\n")
+    f.write("* [Model Test Coverage](#model-test-coverage)\n")
+    f.write("* [Overall Test Coverage](#overall-test-coverage)\n")
+
+
+common_covered: Sequence[str] = []
+experimental_covered: Sequence[str] = []
+
+
+def gen_node_test_coverage(
+    schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool
+) -> None:
+    global common_covered  # noqa: PLW0603
+    global experimental_covered  # noqa: PLW0603
+    generators = set(
+        {
+            "Multinomial",
+            "RandomNormal",
+            "RandomNormalLike",
+            "RandomUniform",
+            "RandomUniformLike",
+        }
+    )
+    node_tests = collect_snippets()
+    common_covered = sorted(
+        s.name
+        for s in schemas
+        if s.name in node_tests
+        and s.support_level == defs.OpSchema.SupportType.COMMON
+        and (s.domain == "ai.onnx.ml") == ml
+    )
+    common_no_cover = sorted(
+        s.name
+        for s in schemas
+        if s.name not in node_tests
+        and s.support_level == defs.OpSchema.SupportType.COMMON
+        and (s.domain == "ai.onnx.ml") == ml
+    )
+    common_generator = sorted(name for name in common_no_cover if name in generators)
+    experimental_covered = sorted(
+        s.name
+        for s in schemas
+        if s.name in node_tests
+        and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL
+        and (s.domain == "ai.onnx.ml") == ml
+    )
+    experimental_no_cover = sorted(
+        s.name
+        for s in schemas
+        if s.name not in node_tests
+        and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL
+        and (s.domain == "ai.onnx.ml") == ml
+    )
+    experimental_generator = sorted(
+        name for name in experimental_no_cover if name in generators
+    )
+    num_common = len(common_covered) + len(common_no_cover) - len(common_generator)
+    num_experimental = (
+        len(experimental_covered)
+        + len(experimental_no_cover)
+        - len(experimental_generator)
+    )
+    f.write("# Node Test Coverage\n")
+    f.write("## Summary\n")
+    if num_common:
+        f.write(
+            f"Node tests have covered {len(common_covered)}/{num_common} "
+            f"({len(common_covered) / float(num_common) * 100:.2f}%, {len(common_generator)} "
+            f"generators excluded) common operators.\n\n"
+        )
+    else:
+        f.write("Node tests have covered 0/0 (N/A) common operators. \n\n")
+    if num_experimental:
+        f.write(
+            "Node tests have covered {}/{} ({:.2f}%, {} generators excluded) "  # noqa: UP032
+            "experimental operators.\n\n".format(
+                len(experimental_covered),
+                num_experimental,
+                (len(experimental_covered) / float(num_experimental) * 100),
+                len(experimental_generator),
+            )
+        )
+    else:
+        f.write("Node tests have covered 0/0 (N/A) experimental operators.\n\n")
+    titles = [
+        "💚Covered Common Operators",
+        "💔No Cover Common Operators",
+        "💚Covered Experimental Operators",
+        "💔No Cover Experimental Operators",
+    ]
+    all_lists = [
+        common_covered,
+        common_no_cover,
+        experimental_covered,
+        experimental_no_cover,
+    ]
+    for t in titles:
+        f.write(f"* [{t[9:]}](#{t[9:].lower().replace(' ', '-')})\n")
+    f.write("\n")
+    for t, l in zip(titles, all_lists):  # noqa: E741
+        f.write(f"## {t}\n")
+        for s in l:
+            f.write(f"### {s}")
+            if s in node_tests:
+                f.write(
+                    f"\nThere are {len(node_tests[s])} test cases, listed as following:\n"
+                )
+                for summary, code in sorted(node_tests[s]):
+                    f.write("
\n") + f.write(f"{summary}\n\n") + f.write(f"```python\n{code}\n```\n\n") + f.write("
\n") + else: # noqa: PLR5501 + if s in generators: + f.write(" (random generator operator)\n") + else: + f.write(" (call for test cases)\n") + f.write("\n\n") + f.write("
\n\n") + + +def gen_model_test_coverage( + schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool +) -> None: + f.write("# Model Test Coverage\n") + # Process schemas + schema_dict = {} + for schema in schemas: + schema_dict[schema.name] = schema + # Load models from each model test using Runner.prepare_model_data + # Need to grab associated nodes + attrs: dict[str, dict[str, list[Any]]] = {} + model_paths: list[Any] = [] + for rt in load_model_tests(kind="real"): + if rt.url.startswith("onnx/backend/test/data/light/"): + # testing local files + model_name = os.path.normpath( + os.path.join(os.path.dirname(__file__), "..", "..", "..", rt.url) + ) + if not os.path.exists(model_name): + raise FileNotFoundError(f"Unable to find model {model_name!r}.") + model_paths.append(model_name) + else: + model_dir = Runner.prepare_model_data(rt) + model_paths.append(os.path.join(model_dir, "model.onnx")) + model_paths.sort() + model_written = False + for model_pb_path in model_paths: + model = load(model_pb_path) + if ml: + ml_present = False + for opset in model.opset_import: + if opset.domain == "ai.onnx.ml": + ml_present = True + if not ml_present: + continue + else: + model_written = True + f.write(f"## {model.graph.name}\n") + # Deconstruct model + num_covered = 0 + for node in model.graph.node: + if node.op_type in common_covered or node.op_type in experimental_covered: + num_covered += 1 + # Add details of which nodes are/aren't covered + # Iterate through and store each node's attributes + for attr in node.attribute: + if node.op_type not in attrs: + attrs[node.op_type] = {} + if attr.name not in attrs[node.op_type]: + attrs[node.op_type][attr.name] = [] + if attr.type == AttributeProto.FLOAT: + if attr.f not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.f) + elif attr.type == AttributeProto.INT: + if attr.i not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.i) + elif attr.type == AttributeProto.STRING: + if attr.s not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.s) + elif attr.type == AttributeProto.TENSOR: + if attr.t not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.t) + elif attr.type == AttributeProto.GRAPH: + if attr.g not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.g) + elif attr.type == AttributeProto.FLOATS: + if attr.floats not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.floats) + elif attr.type == AttributeProto.INTS: + if attr.ints not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.ints) + elif attr.type == AttributeProto.STRINGS: + if attr.strings not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.strings) + elif attr.type == AttributeProto.TENSORS: + if attr.tensors not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.tensors) + elif attr.type == AttributeProto.GRAPHS: + if attr.graphs not in attrs[node.op_type][attr.name]: + attrs[node.op_type][attr.name].append(attr.graphs) + f.write( + f"\n{model.graph.name} has {num_covered} nodes. " + f"Of these, {len(model.graph.node)} are covered by node tests " + f"({100.0 * float(num_covered) / float(len(model.graph.node))}%)\n\n\n" + ) + # Iterate through attrs, print + f.write("
\n") + f.write("nodes\n\n") + for op in sorted(attrs): + f.write("
\n") + # Get total number of attributes for node schema + f.write( + f"{op}: {len(attrs[op])} out of {len(schema_dict[op].attributes)} attributes covered\n\n" + ) + for attribute in sorted(schema_dict[op].attributes): + if attribute in attrs[op]: + f.write(f"{attribute}: {len(attrs[op][attribute])}\n") + else: + f.write(f"{attribute}: 0\n") + f.write("
\n") + f.write("
\n\n\n") + if not model_written and ml: + f.write("No model tests present for selected domain\n") + + +def gen_overall_test_coverage( + f: IO[Any], +) -> None: + f.write("# Overall Test Coverage\n") + f.write("## To be filled.\n") + + +def gen_spdx(f: IO[Any]) -> None: + f.write("\n") + + +def main() -> None: + base_dir = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + ) + docs_dir = os.path.join(base_dir, "docs") + schemas = defs.get_all_schemas() + + has_ml = is_ml(schemas) + fname = os.path.join(docs_dir, "TestCoverage.md") + with open(fname, "w+", newline="", encoding="utf-8") as f: # type: ignore + gen_spdx(f) + gen_outlines(f, False) + gen_node_test_coverage(schemas, f, False) + gen_model_test_coverage(schemas, f, False) + gen_overall_test_coverage(f) + + if has_ml: + fname = os.path.join(docs_dir, "TestCoverage-ml.md") + with open(fname, "w+", newline="", encoding="utf-8") as f: # type: ignore + gen_spdx(f) + gen_outlines(f, True) + gen_node_test_coverage(schemas, f, True) + gen_model_test_coverage(schemas, f, True) + gen_overall_test_coverage(f) + + +if __name__ == "__main__": + main() diff --git a/pythonProject/.venv/Lib/site-packages/onnx/checker.cc b/pythonProject/.venv/Lib/site-packages/onnx/checker.cc new file mode 100644 index 0000000000000000000000000000000000000000..80ec57f39c07ea16d41353ae7808afd53553b12c --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/checker.cc @@ -0,0 +1,1057 @@ +// Copyright (c) ONNX Project Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +#include "onnx/checker.h" + +#include +#include +#include +#include + +#include "onnx/common/file_utils.h" +#include "onnx/defs/schema.h" +#include "onnx/defs/tensor_proto_util.h" +#include "onnx/shape_inference/implementation.h" +#include "onnx/string_utils.h" + +#ifdef _WIN32 +#include "onnx/common/path.h" +#endif + +namespace ONNX_NAMESPACE { +namespace checker { + +#define enforce_has_field(proto, field) \ + do { \ + if (!proto.has_##field()) { \ + fail_check("Field '", #field, "' of '", #proto, "' is required but missing."); \ + } \ + } while (0) + +#define enforce_non_empty_field(proto, field) \ + do { \ + if (proto.field().empty()) { \ + fail_check("Field '", #field, "' of '", #proto, "' is required to be non-empty."); \ + } \ + } while (0) + +void check_value_info(const ValueInfoProto& value_info, const CheckerContext& ctx) { + enforce_non_empty_field(value_info, name); + // Relax constraint for subgraph input/output. + if (!ctx.is_main_graph()) + return; + enforce_has_field(value_info, type); + const auto value_case = value_info.type().value_case(); + switch (value_case) { + case TypeProto::kTensorType: { + const auto& type = value_info.type().tensor_type(); + enforce_has_field(type, elem_type); + enforce_has_field(type, shape); + } break; + case TypeProto::kOptionalType: { + const auto& type = value_info.type().optional_type(); + enforce_has_field(type, elem_type); + } break; + case TypeProto::kSequenceType: { + const auto& type = value_info.type().sequence_type(); + enforce_has_field(type, elem_type); + } break; + case TypeProto::kMapType: { + const auto& type = value_info.type().map_type(); + enforce_has_field(type, key_type); + enforce_has_field(type, value_type); + } break; +#ifdef ONNX_ML + case TypeProto::kOpaqueType: + break; +#endif + case TypeProto::kSparseTensorType: { + const auto& type = value_info.type().sparse_tensor_type(); + enforce_has_field(type, elem_type); + enforce_has_field(type, shape); + } break; + + default: + fail_check("Unrecognized type value case (value_info name: ", value_info.name(), "): ", value_case); + } +} + +void check_tensor(const TensorProto& tensor, const CheckerContext& ctx) { + enforce_has_field(tensor, data_type); + if (tensor.data_type() == TensorProto::UNDEFINED) { + fail_check("setting data_type field (tensor name: ", tensor.name(), ") to UNDEFINED is not allowed"); + } + + int num_value_fields = 0; + + const char* value_field = nullptr; + +#define check_data_field(field) \ + bool has_##field = !tensor.field().empty(); \ + if (has_##field) { \ + ++num_value_fields; \ + value_field = #field; \ + } + + check_data_field(float_data); + check_data_field(int32_data); + check_data_field(string_data); + check_data_field(int64_data); + check_data_field(raw_data); + check_data_field(double_data); + check_data_field(uint64_data); + +#undef check_data_field + + bool stored_externally = tensor.has_data_location() && tensor.data_location() == TensorProto::EXTERNAL; + if (stored_externally) { + if (num_value_fields != 0) { + fail_check( + "Data of TensorProto ( tensor name: ", + tensor.name(), + ") is stored externally and should not have data field.", + value_field); + } + + bool has_location = false; + for (const StringStringEntryProto& entry : tensor.external_data()) { + if (entry.has_key() && entry.has_value() && entry.key() == "location") { + has_location = true; + resolve_external_data_location(ctx.get_model_dir(), entry.value(), tensor.name()); + } + } + if (!has_location) { + fail_check("TensorProto ( tensor name: ", tensor.name(), ") is stored externally but doesn't have a location."); + } + return; + } + int64_t nelem = 1; + for (auto x : tensor.dims()) { + nelem *= x; + } + if (nelem == 0 && num_value_fields != 0) { + fail_check("TensorProto (tensor name: ", tensor.name(), ") is 0-element but contains data!"); + } + if (nelem != 0 && num_value_fields != 1) { + fail_check("TensorProto (tensor name: ", tensor.name(), ") should contain one and only one value field."); + } + if (has_raw_data) { + if (tensor.data_type() == TensorProto::STRING) { + fail_check("STRING data (tensor name: ", tensor.name(), ") should not be stored in raw_data field"); + } + return; + } else { +#define check_field(field) \ + if (nelem != 0 && !has_##field) { \ + fail_check( \ + "values of data_type '", \ + tensor.data_type(), \ + "' should be stored in field '", \ + #field, \ + "' instead of '", \ + value_field, \ + "'"); \ + } + + switch (tensor.data_type()) { + case TensorProto::FLOAT: + case TensorProto::COMPLEX64: + check_field(float_data); + break; + + case TensorProto::DOUBLE: + case TensorProto::COMPLEX128: + check_field(double_data); + break; + + case TensorProto::INT32: + case TensorProto::UINT8: + case TensorProto::INT8: + case TensorProto::UINT16: + case TensorProto::INT16: + case TensorProto::BOOL: + case TensorProto::FLOAT16: + case TensorProto::BFLOAT16: + case TensorProto::FLOAT8E4M3FN: + case TensorProto::FLOAT8E4M3FNUZ: + case TensorProto::FLOAT8E5M2: + case TensorProto::FLOAT8E5M2FNUZ: + case TensorProto::FLOAT8E8M0: + case TensorProto::UINT4: + case TensorProto::INT4: + case TensorProto::FLOAT4E2M1: + check_field(int32_data); + break; + + case TensorProto::INT64: + check_field(int64_data); + break; + + case TensorProto::UINT32: + case TensorProto::UINT64: + check_field(uint64_data); + break; + + case TensorProto::STRING: + check_field(string_data); + break; + + default: + fail_check("Unrecognized data_type (tensor name: ", tensor.name(), "): ", tensor.data_type()); + } + } + +#undef check_field +} + +void check_sequence(const SequenceProto& sequence, const CheckerContext& ctx) { + enforce_has_field(sequence, elem_type); + if (sequence.elem_type() == SequenceProto::TENSOR) { + for (const TensorProto& tensor : sequence.tensor_values()) { + check_tensor(tensor, ctx); + } + } else if (sequence.elem_type() == SequenceProto::SPARSE_TENSOR) { + for (const SparseTensorProto& sparse_tensor : sequence.sparse_tensor_values()) { + check_sparse_tensor(sparse_tensor, ctx); + } + } else if (sequence.elem_type() == SequenceProto::SEQUENCE) { + for (const SequenceProto& seq : sequence.sequence_values()) { + check_sequence(seq, ctx); + } + } else if (sequence.elem_type() == SequenceProto::MAP) { + for (const MapProto& map : sequence.map_values()) { + check_map(map, ctx); + } + } else { + fail_check( + "Sequence ( Structure name: ", + sequence.name(), + ", elem_type: ", + sequence.elem_type(), + ") is not have a valid element type."); + } +} + +void check_optional(const OptionalProto& optional, const CheckerContext& ctx) { + enforce_has_field(optional, elem_type); + if (optional.elem_type() == OptionalProto::UNDEFINED) { + return; + } else if (optional.elem_type() == OptionalProto::TENSOR) { + if (optional.has_tensor_value()) + check_tensor(optional.tensor_value(), ctx); + } else if (optional.elem_type() == OptionalProto::SPARSE_TENSOR) { + if (optional.has_sparse_tensor_value()) + check_sparse_tensor(optional.sparse_tensor_value(), ctx); + } else if (optional.elem_type() == OptionalProto::SEQUENCE) { + if (optional.has_sequence_value()) + check_sequence(optional.sequence_value(), ctx); + } else if (optional.elem_type() == OptionalProto::MAP) { + if (optional.has_map_value()) + check_map(optional.map_value(), ctx); + } else { + fail_check( + "Optional ( Structure name: ", + optional.name(), + ", elem_type: ", + optional.elem_type(), + ") is not have a valid element type."); + } +} + +void check_map(const MapProto& map, const CheckerContext& ctx) { + enforce_has_field(map, key_type); + if (map.key_type() == TensorProto::UNDEFINED) { + fail_check("setting key_type field (map name: ", map.name(), ") to UNDEFINED is not allowed"); + } + // Check if key is a valid type, specifically INT8, INT16, INT32, INT64, + // UINT8, UINT16, UINT32, UINT64, or STRING. + if ((map.key_type() == TensorProto::FLOAT) || (map.key_type() == TensorProto::BOOL) || + (map.key_type() == TensorProto::FLOAT16) || (map.key_type() == TensorProto::COMPLEX64) || + (map.key_type() == TensorProto::COMPLEX128)) { + fail_check( + "setting key_type field (map name: ", + map.name(), + ") to invalid TensorProto key_type ", + map.key_type(), + " is not allowed"); + } + + // MapProto will use either keys or string_keys, so only one should be > 0. + if ((map.keys_size() > 0) && (map.string_keys_size() > 0)) { + fail_check("Map (name: ", map.name(), ") should not contain more than one keys field."); + } + + int num_keys = map.keys_size() + map.string_keys_size(); + int num_values = 0; + + enforce_has_field(map, values); + check_sequence(map.values(), ctx); + + if (map.values().elem_type() == SequenceProto::TENSOR) { + num_values = map.values().tensor_values_size(); + } else if (map.values().elem_type() == SequenceProto::SPARSE_TENSOR) { + num_values = map.values().sparse_tensor_values_size(); + } else if (map.values().elem_type() == SequenceProto::SEQUENCE) { + num_values = map.values().sequence_values_size(); + } else if (map.values().elem_type() == SequenceProto::MAP) { + num_values = map.values().map_values_size(); + } + + if (num_keys != num_values) { + fail_check("Length of map keys and map values are not the same (map name: ", map.name(), ")"); + } +} + +// Check that the index data stored in a SparseTensorProto is valid. +// indices: a 1-dimensional tensor; indices[i] represents the +// linearized index value for the i-th nonzero value. +static void +check_sparse_tensor_indices_1(const TensorProto& indices, const SparseTensorProto& sparse_tensor_proto, size_t nnz) { + int dense_rank = sparse_tensor_proto.dims_size(); + int64_t dense_size = 1; + for (int i = 0; i < dense_rank; ++i) + dense_size *= sparse_tensor_proto.dims(i); + if (static_cast(indices.dims(0)) != nnz) { + fail_check("Sparse tensor indices (", indices.name(), ") has ", indices.dims(0), " values, but NNZ is ", nnz); + } + + // Check if indices appear in ascending order, and if they have valid + // values. The i-th value in index_data is the linear index of the i-th + // non-zero value. + const std::vector index_data = ParseData(&indices); + + int64_t prev_index = -1; + for (size_t i = 0; i < nnz; ++i) { + int64_t curr_index = index_data[i]; // linearized index of i-th value + if (curr_index < 0 || curr_index >= dense_size) { + fail_check( + "Sparse tensor (", + indices.name(), + ") index value at position [", + i, + "] out of range [0, ", + dense_size - 1, + "]"); + } + if (curr_index <= prev_index) { + fail_check("Sparse tensor (", indices.name(), ") index value at position [", i, "] not in sorted order."); + } + prev_index = curr_index; + } +} + +// Check that the index data stored in a SparseTensorProto is valid. +// indices: a 2-dimensional tensor; indices[i,j] represents the j-th +// index value for the i-th nonzero value. +static void +check_sparse_tensor_indices_2(const TensorProto& indices, const SparseTensorProto& sparse_tensor_proto, size_t nnz) { + int dense_rank = sparse_tensor_proto.dims_size(); + if (static_cast(indices.dims(0)) != nnz) { + fail_check("Sparse tensor indices (", indices.name(), ") first dimension size does not equal NNZ."); + } + if (indices.dims(1) != dense_rank) { + fail_check("Sparse tensor indices (", indices.name(), ") second dimension size does not match rank of tensor."); + } + + // Check if indices appear in ascending order, and if they have valid + // values. + const std::vector index_data = ParseData(&indices); + int64_t prev_index = -1; + for (size_t i = 0; i < nnz; ++i) { + int64_t curr_index = 0; // linearized index of i-th value + for (int j = 0; j < dense_rank; ++j) { + auto index_ij = index_data[i * dense_rank + j]; + if ((index_ij < 0) || (index_ij >= sparse_tensor_proto.dims(j))) { + fail_check("Sparse tensor (", indices.name(), ") index value at position [", i, ",", j, "] out of range."); + } + curr_index = curr_index * sparse_tensor_proto.dims(j) + index_ij; + } + if (curr_index <= prev_index) { + fail_check( + "Sparse tensor (", indices.name(), ") index value at position [", i, "] not in lexicographic sorted order."); + } + prev_index = curr_index; + } +} + +void check_sparse_tensor(const SparseTensorProto& sparse_tensor_proto, const CheckerContext& ctx) { + enforce_has_field(sparse_tensor_proto, values); + + const TensorProto& values = sparse_tensor_proto.values(); + check_tensor(values, ctx); + + // values must be a tensor of shape [NNZ] + // Currently we restrict the value associated with a particular index-tuple + // to be a single value. In the future, if there is a requirement, + // we may extend this to permit the value to be a "sub-tensor", in which + // case values will have dimension > 1. + if (values.dims_size() != 1) { + fail_check("Sparse tensor values (", values.name(), ") must have rank 1."); + } + size_t nnz = static_cast(values.dims(0)); + int dense_rank = sparse_tensor_proto.dims_size(); + if (dense_rank == 0) { + fail_check("Sparse tensor (", values.name(), ") must have a dense-rank > 0"); + } + for (int i = 0; i < dense_rank; ++i) { + if (sparse_tensor_proto.dims(i) <= 0) { + fail_check("Sparse tensor (", values.name(), ") dimensions are not positive."); + } + } + + if (sparse_tensor_proto.has_indices()) { + const TensorProto& indices = sparse_tensor_proto.indices(); + check_tensor(indices, ctx); + if (indices.data_type() != TensorProto::INT64) { + fail_check("Sparse tensor indices (", indices.name(), ") must have INT64 type."); + } + switch (indices.dims().size()) { + case 1: + // Indices in linearized format + check_sparse_tensor_indices_1(indices, sparse_tensor_proto, nnz); + return; + case 2: + // Check COO-style index. E.g., an index for a 3D tensor is a 3-tuple. + check_sparse_tensor_indices_2(indices, sparse_tensor_proto, nnz); + return; + default: + fail_check("Sparse tensor indices (", indices.name(), ") must have rank 1 or 2."); + } + } else if (nnz != 0) { + fail_check("Sparse tensor (", values.name(), ") has no index values."); + } +} + +// NB: This is a generic "attribute well-formedness" check, it doesn't +// actually test if an attribute is valid per a schema +void check_attribute(const AttributeProto& attr, const CheckerContext& ctx, const LexicalScopeContext& lex_ctx) { + enforce_non_empty_field(attr, name); + + if (ctx.get_ir_version() >= 0x00000002) { + enforce_has_field(attr, type); + } + + int used_fields = 0; + +#define check_type(expected_type) \ + if (attr.has_type() && attr.type() != expected_type) { \ + fail_check("type field and data field mismatch in attribute ", attr.name(), "."); \ + } + +#define check_singular_field(field, type) \ + if (attr.has_##field()) { \ + ++used_fields; \ + check_type(type); \ + } + +#define check_repeated_field(field, type) \ + if (attr.field##_size() > 0) { \ + ++used_fields; \ + check_type(type); \ + } + + check_singular_field(f, AttributeProto::FLOAT); + check_singular_field(i, AttributeProto::INT); + check_singular_field(s, AttributeProto::STRING); + check_singular_field(t, AttributeProto::TENSOR); + check_singular_field(g, AttributeProto::GRAPH); + check_singular_field(tp, AttributeProto::TYPE_PROTO); + check_singular_field(sparse_tensor, AttributeProto::SPARSE_TENSOR); + check_repeated_field(floats, AttributeProto::FLOATS); + check_repeated_field(ints, AttributeProto::INTS); + check_repeated_field(strings, AttributeProto::STRINGS); + check_repeated_field(tensors, AttributeProto::TENSORS); + check_repeated_field(graphs, AttributeProto::GRAPHS); + check_repeated_field(sparse_tensors, AttributeProto::SPARSE_TENSORS); + check_repeated_field(type_protos, AttributeProto::TYPE_PROTOS); + +#undef check_type +#undef check_singular_field +#undef check_repeated_field + + // Normally, used_fields is expected to be 1. + // In proto3, when the value to be set is type default value (say 0 for + // int), used_fields may be 0. + if (used_fields > 1) { + fail_check("Attribute (name: ", attr.name(), ") should not contain more than one value field."); + } + + if (!ctx.is_main_graph()) { + // It's an attribute of a node in function body. + if (attr.has_ref_attr_name() && used_fields != 0) { + // The attribute proto is supposed to refer to data outside and does not + // have its own value field set. + fail_check("Attribute (name: ", attr.name(), ") should refer to attribute in parent node."); + } + } + + if (attr.has_t()) { + check_tensor(attr.t(), ctx); + } + + if (attr.has_sparse_tensor()) { + check_sparse_tensor(attr.sparse_tensor(), ctx); + } + + if (attr.has_g()) { + CheckerContext subgraph_ctx(ctx); + subgraph_ctx.set_is_main_graph(false); + check_graph(attr.g(), subgraph_ctx, lex_ctx); + } + + for (const auto& tensor : attr.tensors()) { + check_tensor(tensor, ctx); + } + for (const auto& sparse_tensor : attr.sparse_tensors()) { + check_sparse_tensor(sparse_tensor, ctx); + } + if (!attr.graphs().empty()) { + CheckerContext subgraph_ctx(ctx); + subgraph_ctx.set_is_main_graph(false); + for (const auto& graph : attr.graphs()) { + check_graph(graph, subgraph_ctx, lex_ctx); + } + } +} + +static void print_warning_if_has_experimental(const std::unordered_set& used_experimental_ops) { + if (!used_experimental_ops.empty()) { + std::string all_experimental_ops; + for (const auto& op : used_experimental_ops) { + all_experimental_ops += " " + op + ","; + } + // Remove the last comma which is unnecessary + all_experimental_ops.pop_back(); + std::cout << "Warning: Model contains experimental ops:" + all_experimental_ops << '\n'; + } +} + +void check_node(const NodeProto& node, const CheckerContext& ctx, const LexicalScopeContext& lex_ctx) { + enforce_non_empty_field(node, op_type); + + if (node.input().empty() && node.output().empty()) { + fail_check("NodeProto (name: ", node.name(), ", type: ", node.op_type(), ") has zero input and zero output."); + } + + // Resolve domain for node + const auto& opset_imports = ctx.get_opset_imports(); + auto dit = opset_imports.find(node.domain()); + if (dit == opset_imports.end()) { + fail_check("No opset import for domain '" + node.domain() + "'"); + } + auto domain_version = dit->second; + + // for ops referencing local functions, there is no schema to verify it. + // will add a check to verify consistency between these ops and local functions. + std::unordered_set seen_attr_names{}; + for (const auto& attr : node.attribute()) { + if (!seen_attr_names.insert(attr.name()).second) { + fail_check("Attribute '", attr.name(), "' appeared multiple times."); + }; + + check_attribute(attr, ctx, lex_ctx); + } + + // This issue will be caught by check_graph instead + if (check_is_experimental_op(node)) { + return; + } + + const auto* schema = ctx.get_schema_registry()->GetSchema(node.op_type(), domain_version, node.domain()); + if (!schema) { + if (node.domain() == ONNX_DOMAIN || node.domain() == AI_ONNX_ML_DOMAIN || node.domain() == "ai.onnx" || + node.domain() == AI_ONNX_TRAINING_DOMAIN || ctx.check_custom_domain()) { + // fail the checker if op is in built-in domains or if it has no schema when `check_custom_domain` is true + fail_check( + "No Op registered for " + node.op_type() + " with domain_version of " + + ONNX_NAMESPACE::to_string(domain_version)); + } + } else if (schema->Deprecated()) { + fail_check( + "Op registered for " + node.op_type() + " is deprecated in domain_version of " + + ONNX_NAMESPACE::to_string(domain_version)); + } else { + schema->Verify(node); + } +} + +void check_graph(const GraphProto& graph, const CheckerContext& ctx, const LexicalScopeContext& parent_lex) { + enforce_non_empty_field(graph, name); + + for (const auto& value_info : graph.input()) { + check_value_info(value_info, ctx); + } + for (const auto& value_info : graph.output()) { + check_value_info(value_info, ctx); + } + + // Inherit values available in outer scope + // Note that we do not allow shadowing, so the presence of an already-defined + // name is always an error. + LexicalScopeContext lex_ctx{parent_lex}; + + for (const auto& value_info : graph.input()) { + // TODO: If shadowing isn't allowed, this should maybe use + // this_or_ancestor_graph_has + if (lex_ctx.this_graph_has(value_info.name())) { + fail_check( + "Graph must be in single static assignment (SSA) form, however '", + value_info.name(), + "' has been used as graph input names multiple times."); + } + lex_ctx.add(value_info.name()); + } + + std::unordered_set initializer_name_checker; + + for (const auto& init : graph.initializer()) { + enforce_has_field(init, name); + const auto& name = init.name(); + if (name.empty()) { + fail_check("Tensor initializers must have a non-empty name"); + } + + if (!initializer_name_checker.emplace(name).second) { + fail_check(name + " initializer name is not unique"); + } + + check_tensor(init, ctx); + + if (ctx.get_ir_version() <= 0x00000003) { + // Initializers are a subset of graph inputs for IR_VERSION <= 3 + if (!lex_ctx.this_graph_has(name)) { + fail_check(name + " in initializer but not in graph input"); + } + } else { + // An initializer is allowed to have the same name as an input, + // but is not required to (for IR_VERSION >= 4) + lex_ctx.add(name); + } + } + + for (const auto& sparse_init : graph.sparse_initializer()) { + const auto& values = sparse_init.values(); + enforce_has_field(values, name); + const auto& name = values.name(); + if (name.empty()) { + fail_check("Sparse tensor initializers must have a non-empty name"); + } + if (!initializer_name_checker.insert(name).second) { + fail_check(name + " sparse initializer name is not unique across initializers and sparse_initializers"); + } + check_sparse_tensor(sparse_init, ctx); + lex_ctx.add(name); + } + std::unordered_set used_experimental_ops; + for (const auto& node : graph.node()) { + // nodes must be in topologically sorted order + for (const auto& input : node.input()) { + // explicit optional input + if (input.empty()) { + continue; + } + if (!lex_ctx.this_or_ancestor_graph_has(input)) { + fail_check( + "Nodes in a graph must be topologically sorted, however input '", + input, + "' of node: \n", + "name: ", + node.name(), + " OpType: ", + node.op_type(), + "\n is not output of any previous nodes."); + } + } + + if (check_is_experimental_op(node)) { + used_experimental_ops.insert(node.op_type()); + } + + // This needs to happen before SSA check since we don't want to recurse and + // find that outputs from control flow ops are colliding with names in the + // inner block + + ONNX_TRY { + check_node(node, ctx, lex_ctx); + } + ONNX_CATCH(ValidationError & ex) { + ONNX_HANDLE_EXCEPTION([&]() { + ex.AppendContext("Bad node spec for node. Name: " + node.name() + " OpType: " + node.op_type()); + ONNX_THROW_EX(ex); + }); + } + // check for SSA form + for (const auto& output : node.output()) { + // optional output + if (output.empty()) { + continue; + } + + if (lex_ctx.this_or_ancestor_graph_has(output)) { + fail_check( + "Graph must be in single static assignment (SSA) form, however '", + output, + "' has been used as output names multiple times."); + } + lex_ctx.add(output); + } + } + for (const auto& value_info : graph.output()) { + if (!lex_ctx.this_graph_has(value_info.name())) { + fail_check("Graph output '", value_info.name(), "' is not an output of any node in graph."); + } + } + + print_warning_if_has_experimental(used_experimental_ops); +} + +// Utilify function to get the imported version of domain from opset imports +// Returns -1 if requested domain is not found in the opset_imports +static int get_version_for_domain( + const std::string& domain, + const std::unordered_map& opset_imports) { + auto it = opset_imports.find(domain); + if (it == opset_imports.end()) { + return -1; + } + + return it->second; +} + +void check_opset_compatibility( + const NodeProto& node, + const CheckerContext& ctx, + const std::unordered_map& func_opset_imports, + const std::unordered_map& model_opset_imports) { + auto func_opset_version = get_version_for_domain(node.domain(), func_opset_imports); + auto model_opset_version = get_version_for_domain(node.domain(), model_opset_imports); + + if (func_opset_version == -1) { + fail_check("No Opset registered for domain " + node.domain()); + } + + if (model_opset_version == -1) { + // model does not include opset import for a node present in function body. + // This is ok as along as the opset import is present in function level opset imports. + return; + } + + if (func_opset_version == model_opset_version) { + // both versions are same, no need to verify schema. + return; + } + + const auto* schema_for_model_import = + ctx.get_schema_registry()->GetSchema(node.op_type(), model_opset_version, node.domain()); + + const auto* schema_for_function_import = + ctx.get_schema_registry()->GetSchema(node.op_type(), func_opset_version, node.domain()); + + if (!schema_for_model_import && !schema_for_function_import) { + // the op belongs to a custom domain so we cannot verify schema + return; + } + + // if schema is present for 1 but not other or the schema since versions do not match then raise an error + if (!schema_for_model_import || !schema_for_function_import || + schema_for_function_import->since_version() != schema_for_model_import->since_version()) { + fail_check( + "Opset import for domain " + node.domain() + " in function op " + node.op_type() + + "is not compatible with the version imported by model. FunctionOp imports version " + + ONNX_NAMESPACE::to_string(func_opset_version) + " whereas model imports version " + + ONNX_NAMESPACE::to_string(model_opset_version)); + } +} + +void check_model_local_functions( + const ModelProto& model, + const CheckerContext& ctx, + const LexicalScopeContext& parent_lex) { + // make a copy of model opset imports to maintain a main copy of opset imports across the model and + // all model local functions to verify opset compatibility + std::unordered_map model_opset_imports(ctx.get_opset_imports()); + + // merge the opset imports from every function in model_opset_imports + // only add the opset import if an entry for it does not exist in model_opset_imports + // if there is an entry then the compatibility will be checked later on in check_opset_compatibility + // called by check_function. + for (const auto& function_proto : model.functions()) { + for (const auto& opset_import : function_proto.opset_import()) { + if (get_version_for_domain(opset_import.domain(), model_opset_imports) == -1) { + model_opset_imports[opset_import.domain()] = opset_import.version(); + } + } + } + + CheckerContext ctx_copy = ctx; + ctx_copy.set_opset_imports(model_opset_imports); + + for (const auto& function_proto : model.functions()) { + check_function(function_proto, ctx_copy, parent_lex); + } +} + +void check_function(const FunctionProto& function, const CheckerContext& ctx, const LexicalScopeContext& parent_lex) { + enforce_non_empty_field(function, name); + + if (ctx.get_ir_version() >= 0x00000008) { + enforce_has_field(function, domain); + } + + const auto& model_opset_imports = ctx.get_opset_imports(); + CheckerContext ctx_copy = ctx; + + std::unordered_map func_opset_imports; + for (auto& relied_opset : function.opset_import()) { + func_opset_imports[relied_opset.domain()] = static_cast(relied_opset.version()); + } + + ctx_copy.set_opset_imports(func_opset_imports); + + LexicalScopeContext lex_ctx{parent_lex}; + + for (const auto& input : function.input()) { + // TODO: If shadowing isn't allowed, this should maybe use + // this_or_ancestor_graph_has + if (lex_ctx.this_graph_has(input)) { + fail_check( + "Graph must be in single static assignment (SSA) form, however '", input, "' has been used multiple times."); + } + lex_ctx.add(input); + } + + std::unordered_set outputs; + for (const auto& output : function.output()) { + auto result = outputs.insert(output); + if (!result.second) { + fail_check("function (", function.name(), ") should not have duplicate outputs specified."); + } + } + + std::unordered_set attrs; + for (const auto& attr : function.attribute()) { + auto result = attrs.insert(attr); + if (!result.second) { + fail_check("function (", function.name(), ") should not have duplicate attributes specified."); + } + } + std::unordered_set used_experimental_ops; + for (const auto& node : function.node()) { + // nodes must be in topologically sorted order + for (const auto& input : node.input()) { + // explicit optional input + if (input.empty()) { + continue; + } + if (!lex_ctx.this_graph_has(input)) { + fail_check( + "Nodes in a function must be topologically sorted, however input '", + input, + "' of node: \n", + "Name: ", + node.name(), + " OpType: ", + node.op_type(), + "\n is neither output of any previous nodes nor input of the function."); + } + } + + // check whether the opset version imported for a domain by function and model are + // compatible + if (!ctx_copy.skip_opset_compatibility_check()) + check_opset_compatibility(node, ctx_copy, func_opset_imports, model_opset_imports); + if (check_is_experimental_op(node)) { + used_experimental_ops.insert(node.op_type()); + } + check_node(node, ctx_copy, lex_ctx); + + // check for SSA form + for (const auto& output : node.output()) { + // optional output + if (output.empty()) { + continue; + } + if (lex_ctx.this_or_ancestor_graph_has(output)) { + fail_check( + "Function must be in single static assignment (SSA) form, however '", + output, + "' has been used as output names multiple times."); + } + lex_ctx.add(output); + } + } + print_warning_if_has_experimental(used_experimental_ops); +} + +static void check_model(const ModelProto& model, CheckerContext& ctx) { + if (!model.ir_version()) { + fail_check("The model does not have an ir_version set properly."); + } + if (model.ir_version() > IR_VERSION) { + fail_check("Your model ir_version ", model.ir_version(), " is higher than the checker's (", IR_VERSION, ")."); + } + if (model.metadata_props_size() > 1) { + std::unordered_set keys; + for (const StringStringEntryProto& entry : model.metadata_props()) { + auto i = keys.insert(entry.key()); + if (!i.second) { + fail_check("Your model has duplicate keys in metadata_props."); + } + } + } + ctx.set_ir_version(static_cast(model.ir_version())); + std::unordered_map opset_imports; + for (const auto& opset_import : model.opset_import()) { + opset_imports[opset_import.domain()] = static_cast(opset_import.version()); + } + if (model.ir_version() >= 3) { + if (opset_imports.empty()) { + fail_check("model with IR version >= 3 must specify opset_import for ONNX"); + } + } else { + if (opset_imports.empty()) + opset_imports[ONNX_DOMAIN] = 1; + else { + fail_check("model with IR version < 3 cannot have opset_import specified"); + } + } + ctx.set_opset_imports(opset_imports); + LexicalScopeContext lex_ctx; + check_graph(model.graph(), ctx, lex_ctx); + + if (ctx.get_ir_version() >= 0x00000008) { + check_model_local_functions(model, ctx, lex_ctx); + // TODO: check consistency between local functions and ops referencing it. + } +} + +void check_model( + const std::string& model_path, + bool full_check, + bool skip_opset_compatibility_check, + bool check_custom_domain) { + ModelProto model; + LoadProtoFromPath(model_path, model); + + CheckerContext ctx; + std::string model_dir; + size_t pos = model_path.find_last_of("\\/"); + if (pos != std::string::npos) { + model_dir = model_path.substr(0, pos + 1); + } + ctx.set_model_dir(model_dir); + ctx.set_skip_opset_compatibility_check(skip_opset_compatibility_check); + ctx.set_check_custom_domain(check_custom_domain); + check_model(model, ctx); + + if (full_check) { + ShapeInferenceOptions options{true, 1, false}; + ONNX_NAMESPACE::shape_inference::InferShapes(model, ctx.get_schema_registry(), options); + } +} + +void check_model( + const ModelProto& model, + bool full_check, + bool skip_opset_compatibility_check, + bool check_custom_domain) { + CheckerContext ctx; + ctx.set_skip_opset_compatibility_check(skip_opset_compatibility_check); + ctx.set_check_custom_domain(check_custom_domain); + check_model(model, ctx); + if (full_check) { + ShapeInferenceOptions options{true, 1, false}; + // Do not update the model in place by the check from shape inference + // because checker should not modify the original model + ModelProto copy = model; + ONNX_NAMESPACE::shape_inference::InferShapes(copy, ctx.get_schema_registry(), options); + } +} + +std::string resolve_external_data_location( + const std::string& base_dir, + const std::string& location, + const std::string& tensor_name) { +#ifdef _WIN32 + std::filesystem::path base_dir_path(utf8str_to_wstring(base_dir)); + std::filesystem::path file_path(utf8str_to_wstring(location)); +#else // POSIX + std::filesystem::path base_dir_path(base_dir); + std::filesystem::path file_path(location); +#endif + if (file_path.empty()) { + fail_check("Location of external TensorProto ( tensor name: ", tensor_name, ") should not be empty."); + } + if (file_path.is_absolute()) { + fail_check( + "Location of external TensorProto ( tensor name: ", + tensor_name, + ") should be a relative path, but it is an absolute path: ", + location); + } + auto relative_path = file_path.lexically_normal().make_preferred(); + // Check that normalized relative path doesn't contains ".." +#ifdef _WIN32 + if (relative_path.native().find(L"..", 0) != std::string::npos) { +#else // POSIX + if (relative_path.native().find("..", 0) != std::string::npos) { +#endif + fail_check( + "Data of TensorProto ( tensor name: ", + tensor_name, + ") should be file inside the ", + base_dir, + ", but the '", + location, + "' points outside the directory"); + } + auto data_path = base_dir_path / relative_path; +#ifdef _WIN32 + auto data_path_str = wstring_to_utf8str(data_path.native()); +#else + auto data_path_str = data_path.native(); +#endif + // Check whether the file exists + if (data_path.empty() || (data_path_str[0] != '#' && !std::filesystem::exists(data_path))) { + fail_check( + "Data of TensorProto ( tensor name: ", + tensor_name, + ") should be stored in ", + data_path_str, + ", but it doesn't exist or is not accessible."); + } + // Do not allow symlinks or directories. + if (data_path.empty() || (data_path_str[0] != '#' && !std::filesystem::is_regular_file(data_path))) { + fail_check( + "Data of TensorProto ( tensor name: ", + tensor_name, + ") should be stored in ", + data_path_str, + ", but it is not regular file."); + } + return data_path_str; +} + +static std::unordered_set experimental_ops = { + "ATen", + "Affine", + "ConstantFill", + "Crop", + "DynamicSlice", + "GRUUnit", + "GivenTensorFill", + "ImageScaler", + "ParametricSoftplus", + "Scale", + "ScaledTanh"}; + +bool check_is_experimental_op(const NodeProto& node) { + return (node.domain() == ONNX_DOMAIN || node.domain() == "ai.onnx") && experimental_ops.count(node.op_type()); +} + +#undef enforce_has_field +#undef enforce_non_empty_field + +} // namespace checker +} // namespace ONNX_NAMESPACE diff --git a/pythonProject/.venv/Lib/site-packages/onnx/checker.h b/pythonProject/.venv/Lib/site-packages/onnx/checker.h new file mode 100644 index 0000000000000000000000000000000000000000..e836f1e115389a03051c73edc15e6f17c140d12e --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/checker.h @@ -0,0 +1,190 @@ +// Copyright (c) ONNX Project Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include +#include +#include + +#include "onnx/defs/schema.h" +#include "onnx/onnx-data.pb.h" +#include "onnx/string_utils.h" + +namespace ONNX_NAMESPACE { +namespace checker { +class ValidationError final : public std::runtime_error { + public: + using std::runtime_error::runtime_error; + const char* what() const noexcept override { + if (!expanded_message_.empty()) { + return expanded_message_.c_str(); + } + return std::runtime_error::what(); + } + void AppendContext(const std::string& context) { + expanded_message_ = ONNX_NAMESPACE::MakeString(std::runtime_error::what(), "\n\n==> Context: ", context); + } + + private: + std::string expanded_message_; +}; + +#define fail_check(...) \ + ONNX_THROW_EX(ONNX_NAMESPACE::checker::ValidationError(ONNX_NAMESPACE::MakeString(__VA_ARGS__))); + +class CheckerContext final { + public: + int get_ir_version() const { + return ir_version_; + } + void set_ir_version(int v) { + ir_version_ = v; + } + const std::unordered_map& get_opset_imports() const { + return opset_imports_; + } + void set_opset_imports(std::unordered_map imps) { + opset_imports_ = std::move(imps); + } + bool is_main_graph() const { + return is_main_graph_; + } + void set_is_main_graph(bool is_main_graph) { + is_main_graph_ = is_main_graph; + } + + void set_schema_registry(const ISchemaRegistry* schema_registry) { + schema_registry_ = schema_registry; + } + + const ISchemaRegistry* get_schema_registry() const { + return schema_registry_; + } + + void set_model_dir(const std::string& model_dir) { + model_dir_ = model_dir; + } + + std::string get_model_dir() const { + return model_dir_; + } + + bool skip_opset_compatibility_check() const { + return skip_opset_compatibility_check_; + } + + void set_skip_opset_compatibility_check(bool value) { + skip_opset_compatibility_check_ = value; + } + + bool check_custom_domain() const { + return check_custom_domain_; + } + + void set_check_custom_domain(bool value) { + check_custom_domain_ = value; + } + + explicit CheckerContext() = default; + + private: + int ir_version_{-1}; + std::unordered_map opset_imports_; + bool is_main_graph_ = true; + const ISchemaRegistry* schema_registry_ = OpSchemaRegistry::Instance(); + std::string model_dir_; + bool skip_opset_compatibility_check_ = false; + bool check_custom_domain_ = false; +}; + +class LexicalScopeContext final { + public: + LexicalScopeContext() = default; + ~LexicalScopeContext() = default; + + // Construct an instance with the lexical scope from the parent graph to allow + // lookup of names from that scope via this_or_ancestor_graph_has. + // The caller must ensure parent_context remains valid for the entire lifetime + // of the new instance. Alternatively, if that cannot be guaranteed, create an + // instance with the default constructor and populate output_names with the + // values from the parent scope so the values are copied instead. + LexicalScopeContext(const LexicalScopeContext& parent_context) : parent_context_{&parent_context} {} + LexicalScopeContext& operator=(const LexicalScopeContext& parent_context) { + if (this == &parent_context) { + return *this; + } + parent_context_ = &parent_context; + return *this; + } + LexicalScopeContext(LexicalScopeContext&&) = delete; + LexicalScopeContext& operator=(LexicalScopeContext&&) = delete; + + void add(const std::string& name) { + output_names.insert(name); + } + + bool this_graph_has(const std::string& name) const { + return output_names.count(name) > 0; + } + + bool this_or_ancestor_graph_has(const std::string& name) const { + return this_graph_has(name) || (parent_context_ && parent_context_->this_or_ancestor_graph_has(name)); + } + + // public for backwards compatibility. please prefer the public interface of + // this class over directly changing output_names + std::unordered_set output_names; + + private: + const LexicalScopeContext* parent_context_{nullptr}; +}; + +using IR_VERSION_TYPE = decltype(Version::IR_VERSION); +void check_value_info(const ValueInfoProto& value_info, const CheckerContext&); +void check_tensor(const TensorProto& tensor, const CheckerContext&); +void check_sparse_tensor(const SparseTensorProto& sparse_tensor, const CheckerContext&); +void check_sequence(const SequenceProto& sequence, const CheckerContext&); +void check_map(const MapProto& map, const CheckerContext&); +void check_optional(const OptionalProto& opt, const CheckerContext&); +void check_attribute(const AttributeProto& attr, const CheckerContext&, const LexicalScopeContext&); +void check_node(const NodeProto& node, const CheckerContext&, const LexicalScopeContext&); +void check_graph(const GraphProto& graph, const CheckerContext&, const LexicalScopeContext&); +void check_function(const FunctionProto& function, const CheckerContext&, const LexicalScopeContext&); + +// Check schema compatibility for 2 opset versions for a given node. +// Checks whether the schema for 2 versions is same, this is true when the opschema +// does not change between versions. +void check_opset_compatibility( + const NodeProto& node, + const CheckerContext& ctx, + const std::unordered_map& func_opset_imports, + const std::unordered_map& model_opset_imports); + +// Checks all model local functions present in ModelProto +void check_model_local_functions( + const ModelProto& model, + const CheckerContext& ctx, + const LexicalScopeContext& parent_lex); + +void check_model( + const ModelProto& model, + bool full_check = false, + bool skip_opset_compatibility_check = false, + bool check_custom_domain = false); +void check_model( + const std::string& model_path, + bool full_check = false, + bool skip_opset_compatibility_check = false, + bool check_custom_domain = false); +std::string resolve_external_data_location( + const std::string& base_dir, + const std::string& location, + const std::string& tensor_name); +bool check_is_experimental_op(const NodeProto& node); + +} // namespace checker +} // namespace ONNX_NAMESPACE diff --git a/pythonProject/.venv/Lib/site-packages/onnx/checker.py b/pythonProject/.venv/Lib/site-packages/onnx/checker.py new file mode 100644 index 0000000000000000000000000000000000000000..b823adb44a31ba411b728b45d3da84ba77cba0d5 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/checker.py @@ -0,0 +1,172 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +"""Graph utilities for checking whether an ONNX proto message is legal.""" + +from __future__ import annotations + +__all__ = [ + "check_attribute", + "check_function", + "check_graph", + "check_model", + "check_node", + "check_sparse_tensor", + "check_tensor", + "check_value_info", + "DEFAULT_CONTEXT", + "LEXICAL_SCOPE_CONTEXT", + "ValidationError", + "C", + "MAXIMUM_PROTOBUF", +] + +import os +import sys +from typing import TYPE_CHECKING + +import onnx.defs +import onnx.onnx_cpp2py_export.checker as C # noqa: N812 +from onnx.onnx_pb import IR_VERSION + +if TYPE_CHECKING: + from google.protobuf.message import Message + +# Limitation of single protobuf file is 2GiB +MAXIMUM_PROTOBUF = 2147483648 + + +# NB: Please don't edit this context! +DEFAULT_CONTEXT = C.CheckerContext() +DEFAULT_CONTEXT.ir_version = IR_VERSION +# TODO: Maybe ONNX-ML should also be defaulted? +DEFAULT_CONTEXT.opset_imports = {"": onnx.defs.onnx_opset_version()} + +LEXICAL_SCOPE_CONTEXT = C.LexicalScopeContext() + + +def _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None: + if not isinstance(proto, proto_type): + raise TypeError( + f"The proto message needs to be of type '{proto_type.__name__}'" + ) + + +def check_value_info( + value_info: onnx.ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT +) -> None: + _ensure_proto_type(value_info, onnx.ValueInfoProto) + return C.check_value_info(value_info.SerializeToString(), ctx) + + +def check_tensor( + tensor: onnx.TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT +) -> None: + _ensure_proto_type(tensor, onnx.TensorProto) + return C.check_tensor(tensor.SerializeToString(), ctx) + + +def check_attribute( + attr: onnx.AttributeProto, + ctx: C.CheckerContext = DEFAULT_CONTEXT, + lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: + _ensure_proto_type(attr, onnx.AttributeProto) + return C.check_attribute(attr.SerializeToString(), ctx, lexical_scope_ctx) + + +def check_node( + node: onnx.NodeProto, + ctx: C.CheckerContext = DEFAULT_CONTEXT, + lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: + _ensure_proto_type(node, onnx.NodeProto) + return C.check_node(node.SerializeToString(), ctx, lexical_scope_ctx) + + +def check_function( + function: onnx.FunctionProto, + ctx: C.CheckerContext | None = None, + lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: + _ensure_proto_type(function, onnx.FunctionProto) + if ctx is None: + ctx = C.CheckerContext() + ctx.ir_version = onnx.helper.find_min_ir_version_for( + function.opset_import, ignore_unknown=True + ) + ctx.opset_imports = { + domain_version.domain: domain_version.version + for domain_version in function.opset_import + } + C.check_function(function.SerializeToString(), ctx, lexical_scope_ctx) + + +def check_graph( + graph: onnx.GraphProto, + ctx: C.CheckerContext = DEFAULT_CONTEXT, + lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT, +) -> None: + _ensure_proto_type(graph, onnx.GraphProto) + return C.check_graph(graph.SerializeToString(), ctx, lexical_scope_ctx) + + +def check_sparse_tensor( + sparse: onnx.SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT +) -> None: + _ensure_proto_type(sparse, onnx.SparseTensorProto) + C.check_sparse_tensor(sparse.SerializeToString(), ctx) + + +def check_model( + model: onnx.ModelProto | str | bytes | os.PathLike, + full_check: bool = False, + skip_opset_compatibility_check: bool = False, + check_custom_domain: bool = False, +) -> None: + """Check the consistency of a model. + + An exception will be raised if the model's ir_version is not set + properly or is higher than checker's ir_version, or if the model + has duplicate keys in metadata_props. + + If IR version >= 3, the model must specify opset_import. + If IR version < 3, the model cannot have any opset_import specified. + + Args: + model: Model to check. If model is a path, the function checks model + path first. If the model bytes size is larger than 2GB, function + should be called using model path. + full_check: If True, the function also runs shape inference check. + skip_opset_compatibility_check: If True, the function skips the check for + opset compatibility. + check_custom_domain: If True, the function will check all domains. Otherwise + only check built-in domains. + """ + # If model is a path instead of ModelProto + if isinstance(model, (str, os.PathLike)): + C.check_model_path( + os.fspath(model), + full_check, + skip_opset_compatibility_check, + check_custom_domain, + ) + else: + protobuf_string = ( + model if isinstance(model, bytes) else model.SerializeToString() + ) + # If the protobuf is larger than 2GiB, + # remind users should use the model path to check + if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF: + raise ValueError( + "This protobuf of onnx model is too large (>2GiB). Call check_model with model path instead." + ) + C.check_model( + protobuf_string, + full_check, + skip_opset_compatibility_check, + check_custom_domain, + ) + + +ValidationError = C.ValidationError diff --git a/pythonProject/.venv/Lib/site-packages/onnx/compose.py b/pythonProject/.venv/Lib/site-packages/onnx/compose.py new file mode 100644 index 0000000000000000000000000000000000000000..d6c76abf5d4835e1e89f46503d62e68b6f1ff797 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/compose.py @@ -0,0 +1,740 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from typing import TYPE_CHECKING + +from onnx import ( + AttributeProto, + GraphProto, + ModelProto, + TensorProto, + checker, + helper, + utils, +) + +if TYPE_CHECKING: + from collections.abc import MutableMapping + + +def check_overlapping_names( + g1: GraphProto, g2: GraphProto, io_map: list[tuple[str, str]] | None = None +) -> list[tuple[str, list[str]]]: + """Checks whether there are name collisions between two graphs + + Returns a list of tuples where the first element represents the member containing overlapping names + (One of: "node", "edge", "value_info", "initializer", "sparse_initializer"), and the + second element contains a list of names that appear in both graphs on that category. + + Optionally, it takes an io_map, representing the output/inputs to be connected. It provided, overlapping + present in the io_map argument will be ignored. + """ + if not isinstance(g1, GraphProto): + raise TypeError("g1 argument is not an ONNX graph") + if not isinstance(g2, GraphProto): + raise TypeError("g2 argument is not an ONNX graph") + + def _overlapping(c1: list[str], c2: list[str]) -> list[str]: + return list(set(c1) & set(c2)) + + def _edge_names(graph: GraphProto, exclude: set[str] | None = None) -> list[str]: + if exclude is None: + exclude = set() + edges = [] + for n in graph.node: + for i in n.input: + if i != "" and i not in exclude: + edges.append(i) # noqa: PERF401 + for o in n.output: + if o != "" and o not in exclude: + edges.append(o) # noqa: PERF401 + return edges + + result = [] + + if not io_map: + io_map = [] + io_map_inputs = {elem[1] for elem in io_map} + + # Edges already cover input/output + overlap = _overlapping(_edge_names(g1), _edge_names(g2, exclude=io_map_inputs)) + if overlap: + result.append(("edge", overlap)) + + overlap = _overlapping( + [e.name for e in g1.value_info], [e.name for e in g2.value_info] + ) + if overlap: + result.append(("value_info", overlap)) + + overlap = _overlapping( + [e.name for e in g1.initializer], [e.name for e in g2.initializer] + ) + if overlap: + result.append(("initializer", overlap)) + + overlap = _overlapping( + [e.values.name for e in g1.sparse_initializer], + [e.values.name for e in g2.sparse_initializer], + ) + _overlapping( + [e.indices.name for e in g1.sparse_initializer], + [e.indices.name for e in g2.sparse_initializer], + ) + if overlap: + result.append(("sparse_initializer", overlap)) + + return result + + +def merge_graphs( + g1: GraphProto, + g2: GraphProto, + io_map: list[tuple[str, str]], + inputs: list[str] | None = None, + outputs: list[str] | None = None, + prefix1: str | None = None, + prefix2: str | None = None, + name: str | None = None, + doc_string: str | None = None, +) -> GraphProto: + """Combines two ONNX graphs into a single one. + + The combined graph is defined by connecting the specified set of outputs/inputs. Those inputs/outputs + not specified in the io_map argument will remain as inputs/outputs of the combined graph. + + Arguments: + g1 (GraphProto): First graph + g2 (GraphProto): Second graph + io_map (list of pairs of string): The pairs of names [(out0, in0), (out1, in1), ...] + representing outputs of the first graph and inputs of the second + to be connected + inputs (list of string): Optional list of inputs to be included in the combined graph + By default, all inputs not present in the ``io_map`` argument will be + included in the combined model + outputs (list of string): Optional list of outputs to be included in the combined graph + By default, all outputs not present in the ``io_map`` argument will be + included in the combined model + prefix1 (string): Optional prefix to be added to all names in g1 + prefix2 (string): Optional prefix to be added to all names in g2 + name (string): Optional name for the combined graph + By default, the name is g1.name and g2.name concatenated with an underscore delimiter + doc_string (string): Optional docstring for the combined graph + If not provided, a default docstring with the concatenation of g1 and g2 docstrings is used + + Returns: + GraphProto + """ + if not isinstance(g1, GraphProto): + raise TypeError("g1 argument is not an ONNX graph") + if not isinstance(g2, GraphProto): + raise TypeError("g2 argument is not an ONNX graph") + + # Prefixing names in the graph if requested, adjusting io_map accordingly + if prefix1 or prefix2: + if prefix1: + g1_copy = GraphProto() + g1_copy.CopyFrom(g1) + g1 = g1_copy + g1 = add_prefix_graph(g1, prefix=prefix1) + if prefix2: + g2_copy = GraphProto() + g2_copy.CopyFrom(g2) + g2 = g2_copy + g2 = add_prefix_graph(g2, prefix=prefix2) + io_map = [ + ( + prefix1 + io[0] if prefix1 else io[0], + prefix2 + io[1] if prefix2 else io[1], + ) + for io in io_map + ] + + io_map_g1_outs = {io[0] for io in io_map} + io_map_g2_ins = {io[1] for io in io_map} + reversed_io_map = {in_name: out_name for out_name, in_name in io_map} + g1_outs = {o.name for o in g1.output} + g2_ins = {i.name for i in g2.input} + + # If necessary extract subgraphs + if inputs or outputs: + if not inputs: + g1_inputs = [i.name for i in g1.input] + g2_inputs = [i.name for i in g2.input] + else: + input_set = set(inputs) + g1_inputs = [i.name for i in g1.input if i.name in input_set] + g2_inputs = [ + i.name + for i in g2.input + if i.name in input_set or i.name in io_map_g2_ins + ] + + if not outputs: + g1_outputs = [o.name for o in g1.output] + g2_outputs = [o.name for o in g2.output] + else: + output_set = set(outputs) + g1_outputs = [ + o.name + for o in g1.output + if o.name in output_set or o.name in io_map_g1_outs + ] + g2_outputs = [o.name for o in g2.output if o.name in output_set] + + if len(g1_inputs) < len(g1.input) or len(g1_outputs) < len(g1.output): + e1 = utils.Extractor(helper.make_model(g1)) + g1 = e1.extract_model(g1_inputs, g1_outputs).graph + + if len(g2_inputs) < len(g2.input) or len(g2_outputs) < len(g2.output): + e2 = utils.Extractor(helper.make_model(g2)) + g2 = e2.extract_model(g2_inputs, g2_outputs).graph + + # Check that input/output names specified in the io_map argument are valid input/output names + for g1_out_name, g2_in_name in io_map: + if g1_out_name not in g1_outs: + raise ValueError(f"Output {g1_out_name} is not present in g1") + if g2_in_name not in g2_ins: + raise ValueError(f"Input {g2_in_name} is not present in g2") + + # Check for name collision + overlapping_names = check_overlapping_names(g1, g2, io_map) + if len(overlapping_names) > 0: + category, names = overlapping_names[0] + raise ValueError( + "Cant merge two graphs with overlapping names. " + f"Found repeated {category} names: " + + ", ".join(names) + + "\n" + + "Consider using ``onnx.compose.add_prefix`` to add a prefix to names in one of the graphs." + ) + + g = GraphProto() + + g.node.extend(g1.node) + g2_nodes_begin = len(g.node) + g.node.extend(g2.node) + g2_nodes_end = len(g.node) + + # Search inputs of the subgraph recursively + def connect_io(sub_graph: GraphProto, start: int, end: int) -> None: + for node_idx in range(start, end): + node = sub_graph.node[node_idx] + for attr in node.attribute: + if attr.type == AttributeProto.GRAPH: + connect_io(attr.g, 0, len(attr.g.node)) + + for index, name_ in enumerate(node.input): + if name_ in reversed_io_map: + node.input[index] = reversed_io_map[name_] + + # Connecting outputs of the first graph with the inputs of the second + connect_io(g, g2_nodes_begin, g2_nodes_end) + + if inputs: + input_set = set(inputs) + g.input.extend([i for i in g1.input if i.name in input_set]) + g.input.extend([i for i in g2.input if i.name in input_set]) + else: + g.input.extend(g1.input) + g.input.extend([i for i in g2.input if i.name not in io_map_g2_ins]) + + if outputs: + output_set = set(outputs) + g.output.extend([o for o in g1.output if o.name in output_set]) + g.output.extend([o for o in g2.output if o.name in output_set]) + else: + g.output.extend([o for o in g1.output if o.name not in io_map_g1_outs]) + g.output.extend(g2.output) + + g.initializer.extend(g1.initializer) + g.initializer.extend( + [init for init in g2.initializer if init.name not in io_map_g2_ins] + ) + + g.sparse_initializer.extend(g1.sparse_initializer) + g.sparse_initializer.extend( + [ + init + for init in g2.sparse_initializer + if init.values.name not in io_map_g2_ins + ] + ) + + g.value_info.extend(g1.value_info) + g.value_info.extend([vi for vi in g2.value_info if vi.name not in io_map_g2_ins]) + + g.name = name if name is not None else "_".join([g1.name, g2.name]) + + if doc_string is None: + doc_string = ( + f"Graph combining {g1.name} and {g2.name}\n" + + g1.name + + "\n\n" + + g1.doc_string + + "\n\n" + + g2.name + + "\n\n" + + g2.doc_string + ) + g.doc_string = doc_string + + return g + + +def merge_models( + m1: ModelProto, + m2: ModelProto, + io_map: list[tuple[str, str]], + inputs: list[str] | None = None, + outputs: list[str] | None = None, + prefix1: str | None = None, + prefix2: str | None = None, + name: str | None = None, + doc_string: str | None = None, + producer_name: str | None = "onnx.compose.merge_models", + producer_version: str | None = "1.0", + domain: str | None = "", + model_version: int | None = 1, +) -> ModelProto: + """Combines two ONNX models into a single one. + + The combined model is defined by connecting the specified set of outputs/inputs. + Those inputs/outputs not specified in the io_map argument will remain as + inputs/outputs of the combined model. + + Both models should have the same IR version, and same operator sets imported. + + Arguments: + m1 (ModelProto): First model + m2 (ModelProto): Second model + io_map (list of pairs of string): The pairs of names [(out0, in0), (out1, in1), ...] + representing outputs of the first graph and inputs of the second + to be connected + inputs (list of string): Optional list of inputs to be included in the combined graph + By default, all inputs not present in the ``io_map`` argument will be + included in the combined model + outputs (list of string): Optional list of outputs to be included in the combined graph + By default, all outputs not present in the ``io_map`` argument will be + included in the combined model + prefix1 (string): Optional prefix to be added to all names in m1 + prefix2 (string): Optional prefix to be added to all names in m2 + name (string): Optional name for the combined graph + By default, the name is g1.name and g2.name concatenated with an underscore delimiter + doc_string (string): Optional docstring for the combined graph + If not provided, a default docstring with the concatenation of g1 and g2 docstrings is used + producer_name (string): Optional producer name for the combined model. Default: 'onnx.compose' + producer_version (string): Optional producer version for the combined model. Default: "1.0" + domain (string): Optional domain of the combined model. Default: "" + model_version (int): Optional version of the graph encoded. Default: 1 + + Returns: + ModelProto + """ + if not isinstance(m1, ModelProto): + raise TypeError("m1 argument is not an ONNX model") + if not isinstance(m2, ModelProto): + raise TypeError("m2 argument is not an ONNX model") + + if m1.ir_version != m2.ir_version: + raise ValueError( + f"IR version mismatch {m1.ir_version} != {m2.ir_version}." + " Both models should have the same IR version" + ) + ir_version = m1.ir_version + + opset_import_map: MutableMapping[str, int] = {} + opset_imports = list(m1.opset_import) + list(m2.opset_import) + + for entry in opset_imports: + if entry.domain in opset_import_map: + found_version = opset_import_map[entry.domain] + if entry.version != found_version: + raise ValueError( + "Can't merge two models with different operator set ids for a given domain. " + f"Got: {m1.opset_import} and {m2.opset_import}" + ) + else: + opset_import_map[entry.domain] = entry.version + + # Prefixing names in the graph if requested, adjusting io_map accordingly + if prefix1 or prefix2: + if prefix1: + m1_copy = ModelProto() + m1_copy.CopyFrom(m1) + m1 = m1_copy + m1 = add_prefix(m1, prefix=prefix1) + if prefix2: + m2_copy = ModelProto() + m2_copy.CopyFrom(m2) + m2 = m2_copy + m2 = add_prefix(m2, prefix=prefix2) + io_map = [ + ( + prefix1 + io[0] if prefix1 else io[0], + prefix2 + io[1] if prefix2 else io[1], + ) + for io in io_map + ] + + graph = merge_graphs( + m1.graph, + m2.graph, + io_map, + inputs=inputs, + outputs=outputs, + name=name, + doc_string=doc_string, + ) + model = helper.make_model( + graph, + producer_name=producer_name, + producer_version=producer_version, + domain=domain, + model_version=model_version, + opset_imports=opset_imports, + ir_version=ir_version, + ) + + # Merging model metadata props + model_props = {} + for meta_entry in m1.metadata_props: + model_props[meta_entry.key] = meta_entry.value + for meta_entry in m2.metadata_props: + if meta_entry.key in model_props: + value = model_props[meta_entry.key] + if value != meta_entry.value: + raise ValueError( + "Can't merge models with different values for the same model metadata property." + f" Found: property = {meta_entry.key}, with values {value} and {meta_entry.value}." + ) + else: + model_props[meta_entry.key] = meta_entry.value + helper.set_model_props(model, model_props) + + # Merging functions + function_overlap = list( + {f.name for f in m1.functions} & {f.name for f in m2.functions} + ) + if function_overlap: + raise ValueError( + "Can't merge models with overlapping local function names." + " Found in both graphs: " + ", ".join(function_overlap) + ) + model.functions.MergeFrom(m1.functions) + model.functions.MergeFrom(m2.functions) + + checker.check_model(model) + return model + + +def add_prefix_graph( + graph: GraphProto, + prefix: str, + rename_nodes: bool | None = True, + rename_edges: bool | None = True, + rename_inputs: bool | None = True, + rename_outputs: bool | None = True, + rename_initializers: bool | None = True, + rename_value_infos: bool | None = True, + inplace: bool | None = False, + name_map: dict[str, str] | None = None, +) -> GraphProto: + """Adds a prefix to names of elements in a graph: nodes, edges, inputs, outputs, + initializers, sparse initializer, value infos. + + It can be used as a utility before merging graphs that have overlapping names. + Empty names are not prefixed. + + Arguments: + graph (GraphProto): Graph + prefix (str): Prefix to be added to each name in the graph + rename_nodes (bool): Whether to prefix node names + rename_edges (bool): Whether to prefix node edge names + rename_inputs (bool): Whether to prefix input names + rename_outputs (bool): Whether to prefix output names + rename_initializers (bool): Whether to prefix initializer and sparse initializer names + rename_value_infos (bool): Whether to prefix value info names + inplace (bool): If True, mutates the graph directly. + Otherwise, a copy will be created + name_map: (Dict): shared name_map in subgraph + + Returns: + GraphProto + """ + if not isinstance(graph, GraphProto): + raise TypeError("graph argument is not an ONNX graph") + + if not inplace: + g = GraphProto() + g.CopyFrom(graph) + else: + g = graph + + def _prefixed(prefix: str, name: str) -> str: + return prefix + name if len(name) > 0 else name + + if name_map is None: + name_map = {} + + if rename_edges: + # See https://github.com/onnx/onnx/pull/6869#issuecomment-2852719536. + # Consider only intermediate nodes, that are not connected to graph outputs. + # Rename graph inputs or outputs separately based on rename_inputs/rename_outputs flags. + graph_output_names = {o.name for o in g.output} + for n in g.node: + for e in n.output: + if e not in graph_output_names: + name_map[e] = _prefixed(prefix, e) + + if rename_inputs: + for entry in g.input: + name_map[entry.name] = _prefixed(prefix, entry.name) + if rename_outputs: + for entry in g.output: + name_map[entry.name] = _prefixed(prefix, entry.name) + + if rename_nodes: + for n in g.node: + n.name = _prefixed(prefix, n.name) + for attribute in n.attribute: + if attribute.g: + add_prefix_graph( + attribute.g, prefix, inplace=True, name_map=name_map + ) + + if rename_initializers: + for init in g.initializer: + name_map[init.name] = _prefixed(prefix, init.name) + for sparse_init in g.sparse_initializer: + name_map[sparse_init.values.name] = _prefixed( + prefix, sparse_init.values.name + ) + name_map[sparse_init.indices.name] = _prefixed( + prefix, sparse_init.indices.name + ) + + if rename_value_infos: + for entry in g.value_info: + name_map[entry.name] = _prefixed(prefix, entry.name) + + for n in g.node: + for i, output in enumerate(n.output): + if n.output[i] in name_map: + n.output[i] = name_map[output] + for i, input_ in enumerate(n.input): + if n.input[i] in name_map: + n.input[i] = name_map[input_] + + for in_desc in g.input: + if in_desc.name in name_map: + in_desc.name = name_map[in_desc.name] + for out_desc in g.output: + if out_desc.name in name_map: + out_desc.name = name_map[out_desc.name] + + for initializer in g.initializer: + if initializer.name in name_map: + initializer.name = name_map[initializer.name] + for sparse_initializer in g.sparse_initializer: + if sparse_initializer.values.name in name_map: + sparse_initializer.values.name = name_map[sparse_initializer.values.name] + if sparse_initializer.indices.name in name_map: + sparse_initializer.indices.name = name_map[sparse_initializer.indices.name] + + for value_info in g.value_info: + if value_info.name in name_map: + value_info.name = name_map[value_info.name] + + return g + + +def add_prefix( + model: ModelProto, + prefix: str, + rename_nodes: bool | None = True, + rename_edges: bool | None = True, + rename_inputs: bool | None = True, + rename_outputs: bool | None = True, + rename_initializers: bool | None = True, + rename_value_infos: bool | None = True, + rename_functions: bool | None = True, + inplace: bool | None = False, +) -> ModelProto: + """Adds a prefix to names of elements in a graph: nodes, edges, inputs, outputs, + initializers, sparse initializer, value infos, and local functions. + + It can be used as a utility before merging graphs that have overlapping names. + Empty names are not _prefixed. + + Arguments: + model (ModelProto): Model + prefix (str): Prefix to be added to each name in the graph + rename_nodes (bool): Whether to prefix node names + rename_edges (bool): Whether to prefix node edge names + rename_inputs (bool): Whether to prefix input names + rename_outputs (bool): Whether to prefix output names + rename_initializers (bool): Whether to prefix initializer and sparse initializer names + rename_value_infos (bool): Whether to prefix value info nanes + rename_functions (bool): Whether to prefix local function names + inplace (bool): If True, mutates the model directly. + Otherwise, a copy will be created + + Returns: + ModelProto + """ + if not isinstance(model, ModelProto): + raise TypeError("model argument is not an ONNX model") + + if not inplace: + m = ModelProto() + m.CopyFrom(model) + model = m + + add_prefix_graph( + model.graph, + prefix, + rename_nodes=rename_nodes, + rename_edges=rename_edges, + rename_inputs=rename_inputs, + rename_outputs=rename_outputs, + rename_initializers=rename_initializers, + rename_value_infos=rename_value_infos, + inplace=True, # No need to create a copy, since it's a new model + ) + + if rename_functions: + f_name_map = {} + for f in model.functions: + new_f_name = prefix + f.name + f_name_map[f.name] = new_f_name + f.name = new_f_name + # Adjust references to local functions in other local function + # definitions + for f in model.functions: + for n in f.node: + if n.op_type in f_name_map: + n.op_type = f_name_map[n.op_type] + # Adjust references to local functions in the graph + for n in model.graph.node: + if n.op_type in f_name_map: + n.op_type = f_name_map[n.op_type] + + return model + + +def expand_out_dim_graph( + graph: GraphProto, + dim_idx: int, + inplace: bool | None = False, +) -> GraphProto: + """Inserts an extra dimension with extent 1 to each output in the graph. + + Inserts an Unsqueeze node for each output. It can be used as a utility before merging graphs, + for example when the second one expects a batch dimension. + + Arguments: + graph (GraphProto): Graph + dim_idx (int): Index of the dimension to be inserted. + A negative value means counting dimensions from the back. + inplace (bool): If True, mutates the model directly. + Otherwise, a copy will be created + + Returns: + GraphProto + """ + if not isinstance(graph, GraphProto): + raise TypeError("graph argument is not an ONNX graph") + + if not inplace: + g = GraphProto() + g.CopyFrom(graph) + else: + g = graph + + orig_out_names = [output.name for output in g.output] + + for n in g.node: + for i, out in enumerate(n.output): + if out in orig_out_names: + n.output[i] = out + f"_collapsed_dim_{dim_idx}" + for i, inp in enumerate(n.input): + if inp in orig_out_names: + n.input[i] = inp + f"_collapsed_dim_{dim_idx}" + + expand_dim_k = g.name + "_expand_out_dim_idx" + g.node.append( + helper.make_node( + "Constant", + inputs=[], + outputs=[expand_dim_k], + name=f"{expand_dim_k}-constant", + value=helper.make_tensor( + name=f"{expand_dim_k}-value", + data_type=TensorProto.INT64, + dims=[ + 1, + ], + vals=[ + dim_idx, + ], + ), + ) + ) + + for _ in range(len(g.output)): + o = g.output.pop(0) + prev_output = o.name + f"_collapsed_dim_{dim_idx}" + g.node.append( + helper.make_node( + "Unsqueeze", + inputs=[prev_output, expand_dim_k], + outputs=[o.name], + name=f"unsqueeze-{o.name}", + ) + ) + new_shape = [d.dim_value for d in o.type.tensor_type.shape.dim] + new_shape.insert(dim_idx, 1) + g.output.append( + helper.make_tensor_value_info( + o.name, o.type.tensor_type.elem_type, new_shape + ) + ) + return g + + +def expand_out_dim( + model: ModelProto, + dim_idx: int, + inplace: bool | None = False, +) -> ModelProto: + """Inserts an extra dimension with extent 1 to each output in the graph. + + Inserts an Unsqueeze node for each output. It can be used as a utility before merging graphs, + for example when the second one expects a batch dimension. + + Arguments: + model (ModelProto): Model + dim_idx (int): Index of the dimension to be inserted. + A negative value means counting dimensions from the back. + inplace (bool): If True, mutates the model directly. + Otherwise, a copy will be created + + Returns: + ModelProto + """ + if not isinstance(model, ModelProto): + raise TypeError("model argument is not an ONNX model") + + if not inplace: + m = ModelProto() + m.CopyFrom(model) + model = m + + expand_out_dim_graph( + model.graph, + dim_idx, + inplace=True, # No need to create a copy, since it's a new model + ) + return model diff --git a/pythonProject/.venv/Lib/site-packages/onnx/cpp2py_export.cc b/pythonProject/.venv/Lib/site-packages/onnx/cpp2py_export.cc new file mode 100644 index 0000000000000000000000000000000000000000..45ef5f1f9330332e2b1638ce53783bf68517ce3d --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/cpp2py_export.cc @@ -0,0 +1,856 @@ +// Copyright (c) ONNX Project Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "onnx/checker.h" +#include "onnx/common/ir_pb_converter.h" +#include "onnx/defs/parser.h" +#include "onnx/defs/printer.h" +#include "onnx/defs/schema.h" +#include "onnx/defs/shape_inference.h" +#include "onnx/inliner/inliner.h" +#include "onnx/py_utils.h" +#include "onnx/shape_inference/implementation.h" +#include "onnx/version_converter/convert.h" + +#if (PYBIND11_VERSION_MAJOR != 2 || PYBIND11_VERSION_MINOR < 12) +#pragma error "Pybind11 must be >= 2.12 to be compatible with numpy 2.0." +#endif + +#ifndef PYBIND11_DETAILED_ERROR_MESSAGES +#define PYBIND11_DETAILED_ERROR_MESSAGES +#endif + +template +struct PythonProtoTypeMap {}; + +#define DEFINE_PROTO_TYPE_MAP(_ProtoType, PY_MODULE_NAME, PY_TYPE_NAME) \ + template <> \ + struct PythonProtoTypeMap<_ProtoType> { \ + static constexpr auto FullName = pybind11::detail::const_name(PY_MODULE_NAME "." PY_TYPE_NAME); \ + static constexpr auto TypeName = pybind11::detail::const_name(PY_TYPE_NAME); \ + static constexpr auto ModuleName = pybind11::detail::const_name(PY_MODULE_NAME); \ + }; + +#ifdef ONNX_USE_LITE_PROTO +using BASE_PROTO_TYPE = ::google::protobuf::MessageLite; +#else +using BASE_PROTO_TYPE = ::google::protobuf::Message; +#endif + +template +class pybind11::detail::type_caster<_ProtoType, std::enable_if_t::value>> { + public: + PYBIND11_TYPE_CASTER(_ProtoType, PythonProtoTypeMap<_ProtoType>::FullName); + bool load(handle py_proto, bool) { + try { + if (!pybind11::hasattr(py_proto, "SerializeToString")) { + return false; + } + pybind11::bytes serialized = py_proto.attr("SerializeToString")(); + std::string serialized_str = serialized; + if (!value.ParseFromString(serialized_str)) { + return false; + } + return true; + } catch (const pybind11::error_already_set&) { + return false; + } + } + static handle cast(const _ProtoType& cpp_proto, return_value_policy /* policy */, handle /* parent */) { + auto py_proto = pybind11::module::import(PythonProtoTypeMap<_ProtoType>::ModuleName.text) + .attr(PythonProtoTypeMap<_ProtoType>::TypeName.text)(); + std::string serialized = cpp_proto.SerializeAsString(); + py_proto.attr("ParseFromString")(pybind11::bytes(serialized)); + return py_proto.release(); + } +}; + +DEFINE_PROTO_TYPE_MAP(ONNX_NAMESPACE::AttributeProto, "onnx", "AttributeProto"); +DEFINE_PROTO_TYPE_MAP(ONNX_NAMESPACE::TypeProto, "onnx", "TypeProto"); +DEFINE_PROTO_TYPE_MAP(ONNX_NAMESPACE::TensorProto, "onnx", "TensorProto"); +DEFINE_PROTO_TYPE_MAP(ONNX_NAMESPACE::SparseTensorProto, "onnx", "SparseTensorProto"); +DEFINE_PROTO_TYPE_MAP(ONNX_NAMESPACE::TensorShapeProto, "onnx", "TensorShapeProto"); + +namespace ONNX_NAMESPACE { +namespace py = pybind11; +using namespace pybind11::literals; + +template +static std::tuple Parse(const char* cstr) { + ProtoType proto{}; + OnnxParser parser(cstr); + auto status = parser.Parse(proto); + std::string out; + proto.SerializeToString(&out); + return std::make_tuple(status.IsOK(), py::bytes(status.ErrorMessage()), py::bytes(out)); +} + +template +static std::string ProtoBytesToText(const py::bytes& bytes) { + ProtoType proto{}; + ParseProtoFromPyBytes(&proto, bytes); + return ProtoToString(proto); +} + +template > +static std::pair, std::unordered_map> ParseProtoFromBytesMap( + const std::unordered_map& bytesMap) { + std::vector values(bytesMap.size()); + std::unordered_map result; + size_t i = 0; + for (const auto& kv : bytesMap) { + ParseProtoFromPyBytes(&values[i], kv.second); + result[kv.first] = &values[i]; + i++; + } + // C++ guarantees that the pointers remain valid after std::vector is moved. + return std::make_pair(std::move(values), result); +} + +static std::unordered_map CallNodeInferenceFunction( + OpSchema* schema, + const py::bytes& nodeBytes, + const std::unordered_map& valueTypesByNameBytes, + const std::unordered_map& inputDataByNameBytes, + const std::unordered_map& inputSparseDataByNameBytes, + std::unordered_map opsetImports, + const int irVersion) { + NodeProto node{}; + ParseProtoFromPyBytes(&node, nodeBytes); + // Early fail if node is badly defined - may throw ValidationError + schema->Verify(node); + + // Convert arguments to C++ types, allocating memory + const auto& valueTypes = ParseProtoFromBytesMap(valueTypesByNameBytes); + const auto& inputData = ParseProtoFromBytesMap(inputDataByNameBytes); + const auto& inputSparseData = ParseProtoFromBytesMap(inputSparseDataByNameBytes); + if (opsetImports.empty()) { + opsetImports[schema->domain()] = schema->SinceVersion(); + } + + shape_inference::GraphInferenceContext graphInferenceContext( + valueTypes.second, opsetImports, nullptr, {}, OpSchemaRegistry::Instance(), nullptr, irVersion); + // Construct inference context and get results - may throw InferenceError + // TODO: if it is desirable for infer_node_outputs to provide check_type, strict_mode, data_prop, + // we can add them to the Python API. For now we just assume the default options. + ShapeInferenceOptions options{false, 0, false}; + shape_inference::InferenceContextImpl ctx( + node, valueTypes.second, inputData.second, inputSparseData.second, options, nullptr, &graphInferenceContext); + schema->GetTypeAndShapeInferenceFunction()(ctx); + // Verify the inference succeeded - may also throw ValidationError + // Note that input types were not validated until now (except that their count was correct) + schema->CheckInputOutputType(ctx); + + // Convert back into bytes returned to Python + std::unordered_map typeProtoBytes; + for (size_t i = 0; i < ctx.allOutputTypes_.size(); i++) { + const auto& proto = ctx.allOutputTypes_[i]; + if (proto.IsInitialized()) { + std::string s; + proto.SerializeToString(&s); + typeProtoBytes[node.output(static_cast(i))] = py::bytes(s); + } + } + + return typeProtoBytes; +} + +template +static std::tuple, std::vector> ConvertPyObjToPtr(const std::vector& pyObjs) { + std::vector objs; + std::vector ptrs; + objs.reserve(pyObjs.size()); + ptrs.reserve(pyObjs.size()); + for (const auto& obj : pyObjs) { + if (obj.is_none()) { + ptrs.push_back(nullptr); + continue; + } + objs.emplace_back(obj.cast()); + ptrs.push_back(&objs.back()); + } + return std::make_tuple(std::move(objs), std::move(ptrs)); +} + +PYBIND11_MODULE(onnx_cpp2py_export, onnx_cpp2py_export) { + onnx_cpp2py_export.doc() = "Python interface to ONNX"; + + onnx_cpp2py_export.attr("ONNX_ML") = py::bool_( +#ifdef ONNX_ML + true +#else // ONNX_ML + false +#endif // ONNX_ML + ); + + // Avoid Segmentation fault if we not free the python function in Custom Schema + onnx_cpp2py_export.add_object("_cleanup", py::capsule([] { OpSchemaRegistry::OpSchemaDeregisterAll(); })); + + // Submodule `schema` + auto defs = onnx_cpp2py_export.def_submodule("defs"); + defs.doc() = "Schema submodule"; + py::register_exception(defs, "SchemaError"); + + py::class_ op_schema(defs, "OpSchema", "Schema of an operator."); + + // Define the class enums first because they are used as default values in function definitions + py::enum_(op_schema, "FormalParameterOption") + .value("Single", OpSchema::Single) + .value("Optional", OpSchema::Optional) + .value("Variadic", OpSchema::Variadic); + + py::enum_(op_schema, "DifferentiationCategory") + .value("Unknown", OpSchema::Unknown) + .value("Differentiable", OpSchema::Differentiable) + .value("NonDifferentiable", OpSchema::NonDifferentiable); + + py::enum_(op_schema, "AttrType") + .value("FLOAT", AttributeProto::FLOAT) + .value("INT", AttributeProto::INT) + .value("STRING", AttributeProto::STRING) + .value("TENSOR", AttributeProto::TENSOR) + .value("GRAPH", AttributeProto::GRAPH) + .value("FLOATS", AttributeProto::FLOATS) + .value("INTS", AttributeProto::INTS) + .value("STRINGS", AttributeProto::STRINGS) + .value("TENSORS", AttributeProto::TENSORS) + .value("GRAPHS", AttributeProto::GRAPHS) + .value("SPARSE_TENSOR", AttributeProto::SPARSE_TENSOR) + .value("SPARSE_TENSORS", AttributeProto::SPARSE_TENSORS) + .value("TYPE_PROTO", AttributeProto::TYPE_PROTO) + .value("TYPE_PROTOS", AttributeProto::TYPE_PROTOS); + + py::enum_(op_schema, "SupportType") + .value("COMMON", OpSchema::SupportType::COMMON) + .value("EXPERIMENTAL", OpSchema::SupportType::EXPERIMENTAL); + + py::class_(op_schema, "Attribute") + .def( + py::init([](std::string name, AttributeProto::AttributeType type, std::string description, bool required) { + // Construct an attribute. + // Use a lambda to swap the order of the arguments to match the Python API + return OpSchema::Attribute(std::move(name), std::move(description), type, required); + }), + py::arg("name"), + py::arg("type"), + py::arg("description") = "", + py::kw_only(), + py::arg("required") = true) + .def( + py::init([](std::string name, const py::object& default_value, std::string description) { + // Construct an attribute with a default value. + // Attributes with default values are not required + auto bytes = default_value.attr("SerializeToString")().cast(); + AttributeProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + return OpSchema::Attribute(std::move(name), std::move(description), std::move(proto)); + }), + py::arg("name"), + py::arg("default_value"), // type: onnx.AttributeProto + py::arg("description") = "") + .def_readonly("name", &OpSchema::Attribute::name) + .def_readonly("description", &OpSchema::Attribute::description) + .def_readonly("type", &OpSchema::Attribute::type) + .def_property_readonly( + "_default_value", + [](OpSchema::Attribute* attr) -> py::bytes { + std::string out; + attr->default_value.SerializeToString(&out); + return out; + }) + .def_readonly("required", &OpSchema::Attribute::required); + + py::class_(op_schema, "TypeConstraintParam") + .def( + py::init, std::string>(), + py::arg("type_param_str"), + py::arg("allowed_type_strs"), + py::arg("description") = "") + .def_readonly("type_param_str", &OpSchema::TypeConstraintParam::type_param_str) + .def_readonly("allowed_type_strs", &OpSchema::TypeConstraintParam::allowed_type_strs) + .def_readonly("description", &OpSchema::TypeConstraintParam::description); + + py::class_(op_schema, "FormalParameter") + .def( + py::init([](std::string name, + std::string type_str, + const std::string& description, + OpSchema::FormalParameterOption param_option, + bool is_homogeneous, + int min_arity, + OpSchema::DifferentiationCategory differentiation_category) { + // Use a lambda to swap the order of the arguments to match the Python API + return OpSchema::FormalParameter( + std::move(name), + description, + std::move(type_str), + param_option, + is_homogeneous, + min_arity, + differentiation_category); + }), + py::arg("name"), + py::arg("type_str"), + py::arg("description") = "", + py::kw_only(), + py::arg("param_option") = OpSchema::Single, + py::arg("is_homogeneous") = true, + py::arg("min_arity") = 1, + py::arg("differentiation_category") = OpSchema::DifferentiationCategory::Unknown) + + .def_property_readonly("name", &OpSchema::FormalParameter::GetName) + .def_property_readonly("types", &OpSchema::FormalParameter::GetTypes) + .def_property_readonly("type_str", &OpSchema::FormalParameter::GetTypeStr) + .def_property_readonly("description", &OpSchema::FormalParameter::GetDescription) + .def_property_readonly("option", &OpSchema::FormalParameter::GetOption) + .def_property_readonly("is_homogeneous", &OpSchema::FormalParameter::GetIsHomogeneous) + .def_property_readonly("min_arity", &OpSchema::FormalParameter::GetMinArity) + .def_property_readonly("differentiation_category", &OpSchema::FormalParameter::GetDifferentiationCategory); + + op_schema + .def( + py::init([](std::string name, + std::string domain, + int since_version, + const std::string& doc, + std::vector inputs, + std::vector outputs, + std::vector, std::string>> type_constraints, + std::vector attributes) { + auto self = OpSchema(); + + self.SetName(std::move(name)).SetDomain(std::move(domain)).SinceVersion(since_version).SetDoc(doc); + // Add inputs and outputs + for (auto i = 0; i < inputs.size(); ++i) { + self.Input(i, std::move(inputs[i])); + } + for (auto i = 0; i < outputs.size(); ++i) { + self.Output(i, std::move(outputs[i])); + } + // Add type constraints + for (auto& type_constraint : type_constraints) { + std::string type_str; + std::vector constraints; + std::string description; + tie(type_str, constraints, description) = std::move(type_constraint); + self.TypeConstraint(std::move(type_str), std::move(constraints), std::move(description)); + } + // Add attributes + for (auto& attribute : attributes) { + self.Attr(std::move(attribute)); + } + + self.Finalize(); + + return self; + }), + py::arg("name"), + py::arg("domain"), + py::arg("since_version"), + py::arg("doc") = "", + py::kw_only(), + py::arg("inputs") = std::vector{}, + py::arg("outputs") = std::vector{}, + py::arg("type_constraints") = std::vector /* constraints */, + std::string /* description */>>{}, + py::arg("attributes") = std::vector{}) + .def_property("name", &OpSchema::Name, [](OpSchema& self, const std::string& name) { self.SetName(name); }) + .def_property( + "domain", &OpSchema::domain, [](OpSchema& self, const std::string& domain) { self.SetDomain(domain); }) + .def_property("doc", &OpSchema::doc, [](OpSchema& self, const std::string& doc) { self.SetDoc(doc); }) + .def_property_readonly("file", &OpSchema::file) + .def_property_readonly("line", &OpSchema::line) + .def_property_readonly("support_level", &OpSchema::support_level) + .def_property_readonly("since_version", &OpSchema::since_version) + .def_property_readonly("deprecated", &OpSchema::deprecated) + .def_property_readonly("function_opset_versions", &OpSchema::function_opset_versions) + .def_property_readonly( + "context_dependent_function_opset_versions", &OpSchema::context_dependent_function_opset_versions) + .def_property_readonly( + "all_function_opset_versions", + [](OpSchema* op) -> std::vector { + std::vector all_function_opset_versions = op->function_opset_versions(); + std::vector context_dependent_function_opset_versions = + op->context_dependent_function_opset_versions(); + all_function_opset_versions.insert( + all_function_opset_versions.end(), + context_dependent_function_opset_versions.begin(), + context_dependent_function_opset_versions.end()); + std::sort(all_function_opset_versions.begin(), all_function_opset_versions.end()); + all_function_opset_versions.erase( + std::unique(all_function_opset_versions.begin(), all_function_opset_versions.end()), + all_function_opset_versions.end()); + return all_function_opset_versions; + }) + .def_property_readonly("min_input", &OpSchema::min_input) + .def_property_readonly("max_input", &OpSchema::max_input) + .def_property_readonly("min_output", &OpSchema::min_output) + .def_property_readonly("max_output", &OpSchema::max_output) + .def_property_readonly("attributes", &OpSchema::attributes) + .def_property_readonly("inputs", &OpSchema::inputs) + .def_property_readonly("outputs", &OpSchema::outputs) + .def_property_readonly("has_type_and_shape_inference_function", &OpSchema::has_type_and_shape_inference_function) + .def_property_readonly("has_data_propagation_function", &OpSchema::has_data_propagation_function) + .def_property_readonly("type_constraints", &OpSchema::typeConstraintParams) + .def_static("is_infinite", [](int v) { return v == std::numeric_limits::max(); }) + .def( + "_infer_node_outputs", + CallNodeInferenceFunction, + py::arg("nodeBytes"), + py::arg("valueTypesByNameBytes"), + py::arg("inputDataByNameBytes") = std::unordered_map{}, + py::arg("inputSparseDataByNameBytes") = std::unordered_map{}, + py::arg("opsetImports") = std::unordered_map{}, + py::arg("irVersion") = int(IR_VERSION)) + .def_property_readonly("has_function", &OpSchema::HasFunction) + .def_property_readonly( + "_function_body", + [](OpSchema* op) -> py::bytes { + std::string bytes = ""; + if (op->HasFunction()) + op->GetFunction()->SerializeToString(&bytes); + return py::bytes(bytes); + }) + .def( + "get_function_with_opset_version", + [](OpSchema* op, int opset_version) -> py::bytes { + std::string bytes = ""; + const FunctionProto* function_proto = op->GetFunction(opset_version); + if (function_proto) { + function_proto->SerializeToString(&bytes); + } + return py::bytes(bytes); + }) + .def_property_readonly("has_context_dependent_function", &OpSchema::HasContextDependentFunction) + .def( + "get_context_dependent_function", + [](OpSchema* op, const py::bytes& bytes, const std::vector& input_types_bytes) -> py::bytes { + NodeProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + std::string func_bytes = ""; + if (op->HasContextDependentFunction()) { + std::vector input_types; + input_types.reserve(input_types_bytes.size()); + for (auto& type_bytes : input_types_bytes) { + TypeProto type_proto{}; + ParseProtoFromPyBytes(&type_proto, type_bytes); + input_types.push_back(type_proto); + } + FunctionBodyBuildContextImpl ctx(proto, input_types); + FunctionProto func_proto; + op->BuildContextDependentFunction(ctx, func_proto); + func_proto.SerializeToString(&func_bytes); + } + return py::bytes(func_bytes); + }) + .def( + "get_context_dependent_function_with_opset_version", + [](OpSchema* op, int opset_version, const py::bytes& bytes, const std::vector& input_types_bytes) + -> py::bytes { + NodeProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + std::string func_bytes = ""; + if (op->HasContextDependentFunctionWithOpsetVersion(opset_version)) { + std::vector input_types; + input_types.reserve(input_types_bytes.size()); + for (auto& type_bytes : input_types_bytes) { + TypeProto type_proto{}; + ParseProtoFromPyBytes(&type_proto, type_bytes); + input_types.push_back(type_proto); + } + FunctionBodyBuildContextImpl ctx(proto, input_types); + FunctionProto func_proto; + op->BuildContextDependentFunction(ctx, func_proto, opset_version); + func_proto.SerializeToString(&func_bytes); + } + return py::bytes(func_bytes); + }) + .def( + "set_type_and_shape_inference_function", + [](OpSchema& op, const std::function& func) -> OpSchema& { + auto wrapper = [=](InferenceContext& ctx) { func(&ctx); }; + return op.TypeAndShapeInferenceFunction(wrapper); + }, + py::return_value_policy::reference_internal) + .def("get_type_and_shape_inference_function", &OpSchema::GetTypeAndShapeInferenceFunction); + + defs.def( + "has_schema", + [](const std::string& op_type, const std::string& domain) -> bool { + return OpSchemaRegistry::Schema(op_type, domain) != nullptr; + }, + "op_type"_a, + "domain"_a = ONNX_DOMAIN) + .def( + "has_schema", + [](const std::string& op_type, int max_inclusive_version, const std::string& domain) -> bool { + return OpSchemaRegistry::Schema(op_type, max_inclusive_version, domain) != nullptr; + }, + "op_type"_a, + "max_inclusive_version"_a, + "domain"_a = ONNX_DOMAIN) + .def( + "schema_version_map", + []() -> std::unordered_map> { + return OpSchemaRegistry::DomainToVersionRange::Instance().Map(); + }) + .def( + "get_schema", + [](const std::string& op_type, const int max_inclusive_version, const std::string& domain) -> OpSchema { + const auto* schema = OpSchemaRegistry::Schema(op_type, max_inclusive_version, domain); + if (!schema) { + fail_schema( + "No schema registered for '" + op_type + "' version '" + std::to_string(max_inclusive_version) + + "' and domain '" + domain + "'!"); + } + return *schema; + }, + "op_type"_a, + "max_inclusive_version"_a, + "domain"_a = ONNX_DOMAIN, + "Return the schema of the operator *op_type* and for a specific version.") + .def( + "get_schema", + [](const std::string& op_type, const std::string& domain) -> OpSchema { + const auto* schema = OpSchemaRegistry::Schema(op_type, domain); + if (!schema) { + fail_schema("No schema registered for '" + op_type + "' and domain '" + domain + "'!"); + } + return *schema; + }, + "op_type"_a, + "domain"_a = ONNX_DOMAIN, + "Return the schema of the operator *op_type* and for a specific version.") + .def( + "get_all_schemas", + []() -> const std::vector { return OpSchemaRegistry::get_all_schemas(); }, + "Return the schema of all existing operators for the latest version.") + .def( + "get_all_schemas_with_history", + []() -> const std::vector { return OpSchemaRegistry::get_all_schemas_with_history(); }, + "Return the schema of all existing operators and all versions.") + .def( + "set_domain_to_version", + [](const std::string& domain, int min_version, int max_version, int last_release_version) { + auto& obj = OpSchemaRegistry::DomainToVersionRange::Instance(); + if (obj.Map().count(domain) == 0) { + obj.AddDomainToVersion(domain, min_version, max_version, last_release_version); + } else { + obj.UpdateDomainToVersion(domain, min_version, max_version, last_release_version); + } + }, + "domain"_a, + "min_version"_a, + "max_version"_a, + "last_release_version"_a = -1, + "Set the version range and last release version of the specified domain.") + .def( + "register_schema", + [](OpSchema schema) { RegisterSchema(std::move(schema), 0, true, true); }, + "schema"_a, + "Register a user provided OpSchema.") + .def( + "deregister_schema", + &DeregisterSchema, + "op_type"_a, + "version"_a, + "domain"_a, + "Deregister the specified OpSchema."); + + // Submodule `checker` + auto checker = onnx_cpp2py_export.def_submodule("checker"); + checker.doc() = "Checker submodule"; + + py::class_ checker_context(checker, "CheckerContext"); + checker_context.def(py::init<>()) + .def_property("ir_version", &checker::CheckerContext::get_ir_version, &checker::CheckerContext::set_ir_version) + .def_property( + "opset_imports", &checker::CheckerContext::get_opset_imports, &checker::CheckerContext::set_opset_imports); + + py::class_ lexical_scope_context(checker, "LexicalScopeContext"); + lexical_scope_context.def(py::init<>()); + + py::register_exception(checker, "ValidationError"); + + checker.def("check_value_info", [](const py::bytes& bytes, const checker::CheckerContext& ctx) -> void { + ValueInfoProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + checker::check_value_info(proto, ctx); + }); + + checker.def("check_tensor", [](const py::bytes& bytes, const checker::CheckerContext& ctx) -> void { + TensorProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + checker::check_tensor(proto, ctx); + }); + + checker.def("check_sparse_tensor", [](const py::bytes& bytes, const checker::CheckerContext& ctx) -> void { + SparseTensorProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + checker::check_sparse_tensor(proto, ctx); + }); + + checker.def( + "check_attribute", + [](const py::bytes& bytes, + const checker::CheckerContext& ctx, + const checker::LexicalScopeContext& lex_ctx) -> void { + AttributeProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + checker::check_attribute(proto, ctx, lex_ctx); + }); + + checker.def( + "check_node", + [](const py::bytes& bytes, + const checker::CheckerContext& ctx, + const checker::LexicalScopeContext& lex_ctx) -> void { + NodeProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + checker::check_node(proto, ctx, lex_ctx); + }); + + checker.def( + "check_function", + [](const py::bytes& bytes, + const checker::CheckerContext& ctx, + const checker::LexicalScopeContext& lex_ctx) -> void { + FunctionProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + checker::check_function(proto, ctx, lex_ctx); + }); + + checker.def( + "check_graph", + [](const py::bytes& bytes, + const checker::CheckerContext& ctx, + const checker::LexicalScopeContext& lex_ctx) -> void { + GraphProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + checker::check_graph(proto, ctx, lex_ctx); + }); + + checker.def( + "check_model", + [](const py::bytes& bytes, bool full_check, bool skip_opset_compatibility_check, bool check_custom_domain) + -> void { + ModelProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + checker::check_model(proto, full_check, skip_opset_compatibility_check, check_custom_domain); + }, + "bytes"_a, + "full_check"_a = false, + "skip_opset_compatibility_check"_a = false, + "check_custom_domain"_a = false); + + checker.def( + "check_model_path", + (void (*)( + const std::string& path, + bool full_check, + bool skip_opset_compatibility_check, + bool check_custom_domain))&checker::check_model, + "path"_a, + "full_check"_a = false, + "skip_opset_compatibility_check"_a = false, + "check_custom_domain"_a = false); + + checker.def("_resolve_external_data_location", &checker::resolve_external_data_location); + + // Submodule `version_converter` + auto version_converter = onnx_cpp2py_export.def_submodule("version_converter"); + version_converter.doc() = "VersionConverter submodule"; + py::register_exception(version_converter, "ConvertError"); + + version_converter.def("convert_version", [](const py::bytes& bytes, const py::int_& target) { + ModelProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + shape_inference::InferShapes(proto); + auto result = version_conversion::ConvertVersion(proto, target); + std::string out; + result.SerializeToString(&out); + return py::bytes(out); + }); + + // Submodule `inliner` + auto inliner = onnx_cpp2py_export.def_submodule("inliner"); + inliner.doc() = "Inliner submodule"; + + inliner.def("inline_local_functions", [](const py::bytes& bytes, bool convert_version) { + ModelProto model{}; + ParseProtoFromPyBytes(&model, bytes); + inliner::InlineLocalFunctions(model, convert_version); + std::string out; + model.SerializeToString(&out); + return py::bytes(out); + }); + + // inline_selected_functions: Inlines all functions specified in function_ids, unless + // exclude is true, in which case it inlines all functions except those specified in + // function_ids. + inliner.def( + "inline_selected_functions", + [](const py::bytes& bytes, std::vector> function_ids, bool exclude) { + ModelProto model{}; + ParseProtoFromPyBytes(&model, bytes); + auto function_id_set = inliner::FunctionIdSet::Create(std::move(function_ids), exclude); + inliner::InlineSelectedLocalFunctions(model, *function_id_set); + std::string out; + model.SerializeToString(&out); + return py::bytes(out); + }); + + inliner.def( + "inline_selected_functions2", + [](const py::bytes& bytes, std::vector> function_ids, bool exclude) { + ModelProto model{}; + ParseProtoFromPyBytes(&model, bytes); + auto function_id_set = inliner::FunctionIdSet::Create(std::move(function_ids), exclude); + inliner::InlineSelectedFunctions(model, *function_id_set, nullptr); + std::string out; + model.SerializeToString(&out); + return py::bytes(out); + }); + + // Submodule `shape_inference` + auto shape_inference = onnx_cpp2py_export.def_submodule("shape_inference"); + shape_inference.doc() = "Shape Inference submodule"; + py::register_exception(shape_inference, "InferenceError"); + + py::class_ inference_context(shape_inference, "InferenceContext", "Inference context"); + + inference_context.def("get_attribute", &InferenceContext::getAttribute); + inference_context.def("get_num_inputs", &InferenceContext::getNumInputs); + inference_context.def("get_input_type", &InferenceContext::getInputType); + inference_context.def("has_input", &InferenceContext::hasInput); + inference_context.def("get_input_data", &InferenceContext::getInputData); + inference_context.def("get_num_outputs", &InferenceContext::getNumOutputs); + inference_context.def("get_output_type", &InferenceContext::getOutputType); + inference_context.def("set_output_type", [](InferenceContext& self, size_t idx, const TypeProto& src) { + auto* dst = self.getOutputType(idx); + if (dst == nullptr) { + return false; + } + dst->CopyFrom(src); + return true; + }); + inference_context.def("has_output", &InferenceContext::hasOutput); + inference_context.def( + "get_graph_attribute_inferencer", + &InferenceContext::getGraphAttributeInferencer, + py::return_value_policy::reference_internal); + inference_context.def("get_input_sparse_data", &InferenceContext::getInputSparseData); + inference_context.def("get_symbolic_input", &InferenceContext::getSymbolicInput); + inference_context.def("get_display_name", &InferenceContext::getDisplayName); + + py::class_ graph_inferencer(shape_inference, "GraphInferencer", "Graph Inferencer"); + graph_inferencer.def( + "do_inferencing", + [](GraphInferencer& self, + const std::vector& inputTypesObj, + const std::vector& inputDataObj) { + auto inputTypesTuple = ConvertPyObjToPtr(inputTypesObj); + auto inputDataTuple = ConvertPyObjToPtr(inputDataObj); + auto ret = self.doInferencing(std::get<1>(inputTypesTuple), std::get<1>(inputDataTuple)); + std::vector ret_obj(ret.size()); + for (size_t i = 0; i < ret.size(); ++i) { + ret_obj[i] = py::cast(ret[i]); + } + return ret_obj; + }); + + shape_inference.def( + "infer_shapes", + [](const py::bytes& bytes, bool check_type, bool strict_mode, bool data_prop) { + ModelProto proto{}; + ParseProtoFromPyBytes(&proto, bytes); + ShapeInferenceOptions options{check_type, strict_mode ? 1 : 0, data_prop}; + shape_inference::InferShapes(proto, OpSchemaRegistry::Instance(), options); + std::string out; + proto.SerializeToString(&out); + return py::bytes(out); + }, + "bytes"_a, + "check_type"_a = false, + "strict_mode"_a = false, + "data_prop"_a = false); + + shape_inference.def( + "infer_shapes_path", + [](const std::string& model_path, + const std::string& output_path, + bool check_type, + bool strict_mode, + bool data_prop) -> void { + ShapeInferenceOptions options{check_type, strict_mode ? 1 : 0, data_prop}; + shape_inference::InferShapes(model_path, output_path, OpSchemaRegistry::Instance(), options); + }); + + shape_inference.def( + "infer_function_output_types", + [](const py::bytes& function_proto_bytes, + const std::vector& input_types_bytes, + const std::vector& attributes_bytes) -> std::vector { + FunctionProto proto{}; + ParseProtoFromPyBytes(&proto, function_proto_bytes); + + std::vector input_types; + input_types.reserve(input_types_bytes.size()); + for (const py::bytes& bytes : input_types_bytes) { + TypeProto type; + ParseProtoFromPyBytes(&type, bytes); + input_types.push_back(type); + } + + std::vector attributes; + attributes.reserve(attributes_bytes.size()); + for (const py::bytes& bytes : attributes_bytes) { + AttributeProto attr; + ParseProtoFromPyBytes(&attr, bytes); + attributes.push_back(attr); + } + + std::vector output_types = shape_inference::InferFunctionOutputTypes(proto, input_types, attributes); + std::vector result; + result.reserve(output_types.size()); + for (auto& type_proto : output_types) { + std::string out; + type_proto.SerializeToString(&out); + result.emplace_back(out); + } + return result; + }); + + // Submodule `parser` + auto parser = onnx_cpp2py_export.def_submodule("parser"); + parser.doc() = "Parser submodule"; + + parser.def("parse_model", Parse); + parser.def("parse_graph", Parse); + parser.def("parse_function", Parse); + parser.def("parse_node", Parse); + + // Submodule `printer` + auto printer = onnx_cpp2py_export.def_submodule("printer"); + printer.doc() = "Printer submodule"; + + printer.def("model_to_text", ProtoBytesToText); + printer.def("function_to_text", ProtoBytesToText); + printer.def("graph_to_text", ProtoBytesToText); +} + +} // namespace ONNX_NAMESPACE diff --git a/pythonProject/.venv/Lib/site-packages/onnx/external_data_helper.py b/pythonProject/.venv/Lib/site-packages/onnx/external_data_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..a23ff15d29b29c826a527fb7d6c9755f70e4a3ca --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/external_data_helper.py @@ -0,0 +1,330 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import os +import re +import sys +import uuid +from itertools import chain +from typing import TYPE_CHECKING, Callable + +import onnx.onnx_cpp2py_export.checker as c_checker +from onnx.onnx_pb import ( + AttributeProto, + FunctionProto, + GraphProto, + ModelProto, + TensorProto, +) + +if TYPE_CHECKING: + from collections.abc import Iterable + + +class ExternalDataInfo: + def __init__(self, tensor: TensorProto) -> None: + self.location = "" + self.offset = None + self.length = None + self.checksum = None + self.basepath = "" + + for entry in tensor.external_data: + setattr(self, entry.key, entry.value) + + if self.offset: + self.offset = int(self.offset) + + if self.length: + self.length = int(self.length) + + +def load_external_data_for_tensor(tensor: TensorProto, base_dir: str) -> None: + """Loads data from an external file for tensor. + Ideally TensorProto should not hold any raw data but if it does it will be ignored. + + Arguments: + tensor: a TensorProto object. + base_dir: directory that contains the external data. + """ + info = ExternalDataInfo(tensor) + external_data_file_path = c_checker._resolve_external_data_location( # type: ignore[attr-defined] + base_dir, info.location, tensor.name + ) + with open(external_data_file_path, "rb") as data_file: + if info.offset: + data_file.seek(info.offset) + + if info.length: + tensor.raw_data = data_file.read(info.length) + else: + tensor.raw_data = data_file.read() + + +def load_external_data_for_model(model: ModelProto, base_dir: str) -> None: + """Loads external tensors into model + + Arguments: + model: ModelProto to load external data to + base_dir: directory that contains external data + """ + for tensor in _get_all_tensors(model): + if uses_external_data(tensor): + load_external_data_for_tensor(tensor, base_dir) + # After loading raw_data from external_data, change the state of tensors + tensor.data_location = TensorProto.DEFAULT + # and remove external data + del tensor.external_data[:] + + +def set_external_data( + tensor: TensorProto, + location: str, + offset: int | None = None, + length: int | None = None, + checksum: str | None = None, + basepath: str | None = None, +) -> None: + if not tensor.HasField("raw_data"): + raise ValueError( + "Tensor " + + tensor.name + + "does not have raw_data field. Cannot set external data for this tensor." + ) + + del tensor.external_data[:] + tensor.data_location = TensorProto.EXTERNAL + for k, v in { + "location": location, + "offset": int(offset) if offset is not None else None, + "length": int(length) if length is not None else None, + "checksum": checksum, + "basepath": basepath, + }.items(): + if v is not None: + entry = tensor.external_data.add() + entry.key = k + entry.value = str(v) + + +def convert_model_to_external_data( + model: ModelProto, + all_tensors_to_one_file: bool = True, + location: str | None = None, + size_threshold: int = 1024, + convert_attribute: bool = False, +) -> None: + """Call to set all tensors with raw data as external data. This call should precede 'save_model'. + 'save_model' saves all the tensors data as external data after calling this function. + + Arguments: + model (ModelProto): Model to be converted. + all_tensors_to_one_file (bool): If true, save all tensors to one external file specified by location. + If false, save each tensor to a file named with the tensor name. + location: specify the external file relative to the model that all tensors to save to. + Path is relative to the model path. + If not specified, will use the model name. + size_threshold: Threshold for size of data. Only when tensor's data is >= the size_threshold + it will be converted to external data. To convert every tensor with raw data to external data set size_threshold=0. + convert_attribute (bool): If true, convert all tensors to external data + If false, convert only non-attribute tensors to external data + + Raise: + ValueError: If location is not a relative path. + FileExistsError: If a file already exists in location. + """ + tensors = _get_initializer_tensors(model) + if convert_attribute: + tensors = _get_all_tensors(model) + + if all_tensors_to_one_file: + file_name = str(uuid.uuid1()) + ".data" + if location: + if os.path.isabs(location): + raise ValueError( + "location must be a relative path that is relative to the model path." + ) + if os.path.exists(location): + raise FileExistsError(f"External data file exists in {location}.") + file_name = location + for tensor in tensors: + if ( + tensor.HasField("raw_data") + and sys.getsizeof(tensor.raw_data) >= size_threshold + ): + set_external_data(tensor, file_name) + else: + for tensor in tensors: + if ( + tensor.HasField("raw_data") + and sys.getsizeof(tensor.raw_data) >= size_threshold + ): + tensor_location = tensor.name + if not _is_valid_filename(tensor_location): + tensor_location = str(uuid.uuid1()) + set_external_data(tensor, tensor_location) + + +def convert_model_from_external_data(model: ModelProto) -> None: + """Call to set all tensors which use external data as embedded data. + save_model saves all the tensors data as embedded data after + calling this function. + + Arguments: + model (ModelProto): Model to be converted. + """ + for tensor in _get_all_tensors(model): + if uses_external_data(tensor): + if not tensor.HasField("raw_data"): + raise ValueError("raw_data field doesn't exist.") + del tensor.external_data[:] + tensor.data_location = TensorProto.DEFAULT + + +def save_external_data(tensor: TensorProto, base_path: str) -> None: + """Writes tensor data to an external file according to information in the `external_data` field. + + Arguments: + tensor (TensorProto): Tensor object to be serialized + base_path: System path of a folder where tensor data is to be stored + """ + info = ExternalDataInfo(tensor) + external_data_file_path = os.path.join(base_path, info.location) + + # Retrieve the tensor's data from raw_data or load external file + if not tensor.HasField("raw_data"): + raise ValueError("raw_data field doesn't exist.") + + # Create file if it doesn't exist + if not os.path.isfile(external_data_file_path): + with open(external_data_file_path, "ab"): + pass + + # Open file for reading and writing at random locations ('r+b') + with open(external_data_file_path, "r+b") as data_file: + data_file.seek(0, 2) + if info.offset is not None: + # Pad file to required offset if needed + file_size = data_file.tell() + if info.offset > file_size: + data_file.write(b"\0" * (info.offset - file_size)) + + data_file.seek(info.offset) + offset = data_file.tell() + data_file.write(tensor.raw_data) + set_external_data(tensor, info.location, offset, data_file.tell() - offset) + + +def _get_all_tensors(onnx_model_proto: ModelProto) -> Iterable[TensorProto]: + """Scan an ONNX model for all tensors and return as an iterator.""" + return chain( + _get_initializer_tensors(onnx_model_proto), + _get_attribute_tensors(onnx_model_proto), + ) + + +def _recursive_attribute_processor( + attribute: AttributeProto, func: Callable[[GraphProto], Iterable[TensorProto]] +) -> Iterable[TensorProto]: + """Create an iterator through processing ONNX model attributes with functor.""" + if attribute.type == AttributeProto.GRAPH: + yield from func(attribute.g) + if attribute.type == AttributeProto.GRAPHS: + for graph in attribute.graphs: + yield from func(graph) + + +def _get_initializer_tensors_from_graph( + graph_or_function: GraphProto | FunctionProto, / +) -> Iterable[TensorProto]: + """Create an iterator of initializer tensors from ONNX model graph/function.""" + if isinstance(graph_or_function, GraphProto): + yield from graph_or_function.initializer + for node in graph_or_function.node: + for attribute in node.attribute: + yield from _recursive_attribute_processor( + attribute, _get_initializer_tensors_from_graph + ) + + +def _get_initializer_tensors(onnx_model_proto: ModelProto) -> Iterable[TensorProto]: + """Create an iterator of initializer tensors from ONNX model.""" + yield from _get_initializer_tensors_from_graph(onnx_model_proto.graph) + for function in onnx_model_proto.functions: + yield from _get_attribute_tensors_from_graph(function) + + +def _get_attribute_tensors_from_graph( + graph_or_function: GraphProto | FunctionProto, / +) -> Iterable[TensorProto]: + """Create an iterator of tensors from node attributes of an ONNX model graph/function.""" + for node in graph_or_function.node: + for attribute in node.attribute: + if attribute.HasField("t"): + yield attribute.t + yield from attribute.tensors + yield from _recursive_attribute_processor( + attribute, _get_attribute_tensors_from_graph + ) + + +def _get_attribute_tensors(onnx_model_proto: ModelProto) -> Iterable[TensorProto]: + """Create an iterator of tensors from node attributes of an ONNX model.""" + yield from _get_attribute_tensors_from_graph(onnx_model_proto.graph) + for function in onnx_model_proto.functions: + yield from _get_attribute_tensors_from_graph(function) + + +def _is_valid_filename(filename: str) -> bool: + """Utility to check whether the provided filename is valid.""" + exp = re.compile('^[^<>:;,?"*|/]+$') + match = exp.match(filename) + return bool(match) + + +def uses_external_data(tensor: TensorProto) -> bool: + """Returns true if the tensor stores data in an external location.""" + return ( + tensor.HasField("data_location") + and tensor.data_location == TensorProto.EXTERNAL + ) + + +def remove_external_data_field(tensor: TensorProto, field_key: str) -> None: + """Removes a field from a Tensor's external_data key-value store. + + Modifies tensor object in place. + + Arguments: + tensor (TensorProto): Tensor object from which value will be removed + field_key (string): The key of the field to be removed + """ + for i, field in enumerate(tensor.external_data): + if field.key == field_key: + del tensor.external_data[i] + + +def write_external_data_tensors(model: ModelProto, filepath: str) -> ModelProto: + """Serializes data for all the tensors which have data location set to TensorProto.External. + + Note: This function also strips basepath information from all tensors' external_data fields. + + Arguments: + model (ModelProto): Model object which is the source of tensors to serialize. + filepath: System path to the directory which should be treated as base path for external data. + + Returns: + ModelProto: The modified model object. + """ + for tensor in _get_all_tensors(model): + # Writing to external data happens in 2 passes: + # 1. Tensors with raw data which pass the necessary conditions (size threshold etc) are marked for serialization + # 2. The raw data in these tensors is serialized to a file + # Thus serialize only if tensor has raw data and it was marked for serialization + if uses_external_data(tensor) and tensor.HasField("raw_data"): + save_external_data(tensor, filepath) + tensor.ClearField("raw_data") + + return model diff --git a/pythonProject/.venv/Lib/site-packages/onnx/gen_proto.py b/pythonProject/.venv/Lib/site-packages/onnx/gen_proto.py new file mode 100644 index 0000000000000000000000000000000000000000..2b8b3d98ab6537e173407ffa2d7c68c3d2d617e7 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/gen_proto.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python + +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import argparse +import glob +import os +import re +import subprocess +from textwrap import dedent +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Iterable + +autogen_header = """\ +// +// WARNING: This file is automatically generated! Please edit onnx.in.proto. +// + + +""" + +LITE_OPTION = """ + +// For using protobuf-lite +option optimize_for = LITE_RUNTIME; + +""" + +DEFAULT_PACKAGE_NAME = "onnx" + +IF_ONNX_ML_REGEX = re.compile(r"\s*//\s*#if\s+ONNX-ML\s*$") +ENDIF_ONNX_ML_REGEX = re.compile(r"\s*//\s*#endif\s*$") +ELSE_ONNX_ML_REGEX = re.compile(r"\s*//\s*#else\s*$") + + +def process_ifs(lines: Iterable[str], onnx_ml: bool) -> Iterable[str]: + in_if = 0 + for line in lines: + if IF_ONNX_ML_REGEX.match(line): + assert in_if == 0 + in_if = 1 + elif ELSE_ONNX_ML_REGEX.match(line): + assert in_if == 1 + in_if = 2 + elif ENDIF_ONNX_ML_REGEX.match(line): + assert in_if in (1, 2) + in_if = 0 + else: # noqa: PLR5501 + if in_if == 0: + yield line + elif in_if == 1 and onnx_ml: + yield line + elif in_if == 2 and not onnx_ml: # noqa: PLR2004 + yield line + + +IMPORT_REGEX = re.compile(r'(\s*)import\s*"([^"]*)\.proto";\s*$') +PACKAGE_NAME_REGEX = re.compile(r"\{PACKAGE_NAME\}") +ML_REGEX = re.compile(r"(.*)\-ml") + + +def process_package_name(lines: Iterable[str], package_name: str) -> Iterable[str]: + need_rename = package_name != DEFAULT_PACKAGE_NAME + for line in lines: + m = IMPORT_REGEX.match(line) if need_rename else None + if m: + include_name = m.group(2) + ml = ML_REGEX.match(include_name) + if ml: + include_name = f"{ml.group(1)}_{package_name}-ml" + else: + include_name = f"{include_name}_{package_name}" + yield m.group(1) + f'import "{include_name}.proto";' + else: + yield PACKAGE_NAME_REGEX.sub(package_name, line) + + +PROTO_SYNTAX_REGEX = re.compile(r'(\s*)syntax\s*=\s*"proto2"\s*;\s*$') +OPTIONAL_REGEX = re.compile(r"(\s*)optional\s(.*)$") + + +def convert_to_proto3(lines: Iterable[str]) -> Iterable[str]: + for line in lines: + # Set the syntax specifier + m = PROTO_SYNTAX_REGEX.match(line) + if m: + yield m.group(1) + 'syntax = "proto3";' + continue + + # Remove optional keywords + m = OPTIONAL_REGEX.match(line) + if m: + yield m.group(1) + m.group(2) + continue + + # Rewrite import + m = IMPORT_REGEX.match(line) + if m: + yield m.group(1) + f'import "{m.group(2)}.proto3";' + continue + + yield line + + +def gen_proto3_code( + protoc_path: str, proto3_path: str, include_path: str, cpp_out: str, python_out: str +) -> None: + print(f"Generate pb3 code using {protoc_path}") + build_args = [protoc_path, proto3_path, "-I", include_path] + build_args.extend(["--cpp_out", cpp_out, "--python_out", python_out]) + subprocess.check_call(build_args) + + +def translate(source: str, proto: int, onnx_ml: bool, package_name: str) -> str: + lines: Iterable[str] = source.splitlines() + lines = process_ifs(lines, onnx_ml=onnx_ml) + lines = process_package_name(lines, package_name=package_name) + if proto == 3: # noqa: PLR2004 + lines = convert_to_proto3(lines) + else: + assert proto == 2 # noqa: PLR2004 + return os.linesep.join(lines) + + +def qualify(f: str, pardir: str | None = None) -> str: + if pardir is None: + pardir = os.path.realpath(os.path.dirname(__file__)) + return os.path.join(pardir, f) + + +def convert( + stem: str, + package_name: str, + output: str, + do_onnx_ml: bool = False, + lite: bool = False, + protoc_path: str = "", +) -> None: + proto_in = qualify(f"{stem}.in.proto") + need_rename = package_name != DEFAULT_PACKAGE_NAME + # Having a separate variable for import_ml ensures that the import statements for the generated + # proto files can be set separately from the ONNX_ML environment variable setting. + import_ml = do_onnx_ml + # We do not want to generate the onnx-data-ml.proto files for onnx-data.in.proto, + # as there is no change between onnx-data.proto and the ML version. + if "onnx-data" in proto_in: + do_onnx_ml = False + if do_onnx_ml: + proto_base = f"{stem}_{package_name}-ml" if need_rename else f"{stem}-ml" + else: + proto_base = f"{stem}_{package_name}" if need_rename else f"{stem}" + proto = qualify(f"{proto_base}.proto", pardir=output) + proto3 = qualify(f"{proto_base}.proto3", pardir=output) + + print(f"Processing {proto_in}") + with open(proto_in, encoding="utf-8") as fin: + source = fin.read() + print(f"Writing {proto}") + with open(proto, "w", newline="", encoding="utf-8") as fout: + fout.write(autogen_header) + fout.write( + translate(source, proto=2, onnx_ml=import_ml, package_name=package_name) + ) + if lite: + fout.write(LITE_OPTION) + print(f"Writing {proto3}") + with open(proto3, "w", newline="", encoding="utf-8") as fout: + fout.write(autogen_header) + fout.write( + translate(source, proto=3, onnx_ml=import_ml, package_name=package_name) + ) + if lite: + fout.write(LITE_OPTION) + if protoc_path: + porto3_dir = os.path.dirname(proto3) + base_dir = os.path.dirname(porto3_dir) + gen_proto3_code(protoc_path, proto3, base_dir, base_dir, base_dir) + pb3_files = glob.glob(os.path.join(porto3_dir, f"{proto_base}.proto3.*")) + for pb3_file in pb3_files: + print(f"Removing {pb3_file}") + os.remove(pb3_file) + + if need_rename: + if do_onnx_ml: + proto_header = qualify(f"{stem}-ml.pb.h", pardir=output) + else: + proto_header = qualify(f"{stem}.pb.h", pardir=output) + print(f"Writing {proto_header}") + with open(proto_header, "w", newline="", encoding="utf-8") as fout: + fout.write("#pragma once\n") + fout.write(f'#include "{proto_base}.pb.h"\n') + + # Generate py mapping + # "-" is invalid in python module name, replaces '-' with '_' + pb_py = qualify(f"{stem.replace('-', '_')}_pb.py", pardir=output) + if need_rename: + pb2_py = qualify(f"{proto_base.replace('-', '_')}_pb2.py", pardir=output) + else: # noqa: PLR5501 + if do_onnx_ml: + pb2_py = qualify(f"{stem.replace('-', '_')}_ml_pb2.py", pardir=output) + else: + pb2_py = qualify(f"{stem.replace('-', '_')}_pb2.py", pardir=output) + + print(f"generating {pb_py}") + with open(pb_py, "w", encoding="utf-8") as f: + f.write( + dedent( + f"""\ + # This file is generated by setup.py. DO NOT EDIT! + + + from .{os.path.splitext(os.path.basename(pb2_py))[0]} import * # noqa + """ + ) + ) + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Generates .proto file variations from .in.proto" + ) + parser.add_argument( + "-p", + "--package", + default="onnx", + help="package name in the generated proto files (default: %(default)s)", + ) + parser.add_argument("-m", "--ml", action="store_true", help="ML mode") + parser.add_argument( + "-l", + "--lite", + action="store_true", + help="generate lite proto to use with protobuf-lite", + ) + parser.add_argument( + "-o", + "--output", + default=os.path.realpath(os.path.dirname(__file__)), + help="output directory (default: %(default)s)", + ) + parser.add_argument( + "--protoc_path", default="", help="path to protoc for proto3 file validation" + ) + parser.add_argument( + "stems", + nargs="*", + default=["onnx", "onnx-operators", "onnx-data"], + help="list of .in.proto file stems (default: %(default)s)", + ) + args = parser.parse_args() + + if not os.path.exists(args.output): + os.makedirs(args.output) + + for stem in args.stems: + convert( + stem, + package_name=args.package, + output=args.output, + do_onnx_ml=args.ml, + lite=args.lite, + protoc_path=args.protoc_path, + ) + + +if __name__ == "__main__": + main() diff --git a/pythonProject/.venv/Lib/site-packages/onnx/helper.py b/pythonProject/.venv/Lib/site-packages/onnx/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..9cfc5307e133aa84b4289e987e79f2d10c85bd9f --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/helper.py @@ -0,0 +1,1686 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import collections.abc +import functools +import math +import numbers +import struct +import typing +from cmath import isnan +from typing import TYPE_CHECKING, Any, Callable, TypeVar, Union + +import google.protobuf.message +import numpy as np +import numpy.typing as npt +import typing_extensions + +import onnx +from onnx import _mapping, defs, subbyte +from onnx.onnx_data_pb import MapProto, OptionalProto, SequenceProto +from onnx.onnx_pb import ( + AttributeProto, + FunctionProto, + GraphProto, + ModelProto, + NodeProto, + OperatorSetIdProto, + TensorProto, + TensorShapeProto, + TrainingInfoProto, + TypeProto, + ValueInfoProto, +) + +if TYPE_CHECKING: + from collections.abc import KeysView, Sequence + + from google.protobuf.internal.containers import RepeatedCompositeFieldContainer + +VersionRowType = Union[tuple[str, int, int, int], tuple[str, int, int, int, int]] +VersionTableType = list[VersionRowType] +AssignmentBindingType = list[tuple[str, str]] + +# This is a copy of the documented version in https://github.com/onnx/onnx/blob/main/docs/Versioning.md#released-versions +# Both must be updated whenever a new version of ONNX is released. +VERSION_TABLE: VersionTableType = [ + # Release-version, IR version, ai.onnx version, ai.onnx.ml version, (optional) ai.onnx.training version + ("1.0", 3, 1, 1), + ("1.1", 3, 5, 1), + ("1.1.2", 3, 6, 1), + ("1.2", 3, 7, 1), + ("1.3", 3, 8, 1), + ("1.4.1", 4, 9, 1), + ("1.5.0", 5, 10, 1), + ("1.6.0", 6, 11, 2), + ("1.7.0", 7, 12, 2, 1), + ("1.8.0", 7, 13, 2, 1), + ("1.8.1", 7, 13, 2, 1), + ("1.9.0", 7, 14, 2, 1), + ("1.10.0", 8, 15, 2, 1), + ("1.10.1", 8, 15, 2, 1), + ("1.10.2", 8, 15, 2, 1), + ("1.11.0", 8, 16, 3, 1), + ("1.12.0", 8, 17, 3, 1), + ("1.13.0", 8, 18, 3, 1), + ("1.13.1", 8, 18, 3, 1), + ("1.14.0", 9, 19, 3, 1), + ("1.14.1", 9, 19, 3, 1), + ("1.15.0", 9, 20, 4, 1), + ("1.16.0", 10, 21, 5, 1), + ("1.17.0", 10, 22, 5, 1), + ("1.18.0", 11, 23, 5, 1), + ("1.19.0", 12, 24, 5, 1), +] + +VersionMapType = dict[tuple[str, int], int] + + +def _create_op_set_id_version_map(table: VersionTableType) -> VersionMapType: + """Create a map from (opset-domain, opset-version) to ir-version from above table.""" + result: VersionMapType = {} + + def process(release_version: str, ir_version: int, *args: Any) -> None: + del release_version # Unused + for pair in zip(["ai.onnx", "ai.onnx.ml", "ai.onnx.training"], args): + if pair not in result: + result[pair] = ir_version + if pair[0] == "ai.onnx.training": + result["ai.onnx.preview.training", pair[1]] = ir_version + + for row in table: + process(*row) + return result + + +OP_SET_ID_VERSION_MAP = _create_op_set_id_version_map(VERSION_TABLE) + + +def find_min_ir_version_for( + opsetidlist: Sequence[OperatorSetIdProto], ignore_unknown: bool = False +) -> int: + """Given list of opset ids, determine minimum IR version required. + + Args: + opsetidlist: A sequence of OperatorSetIdProto. + ignore_unknown: If True, ignore unknown domain and return default minimum + version for that domain. + + Returns: + The minimum IR version required (integer) + """ + default_min_version = 3 + + def find_min(domain: str | None, version: int) -> int: + key = (domain or "ai.onnx", version) + if key in OP_SET_ID_VERSION_MAP: + return OP_SET_ID_VERSION_MAP[key] + if ignore_unknown: + return default_min_version + raise ValueError("Unsupported opset-version.") + + if opsetidlist: + return max(find_min(x.domain, x.version) for x in opsetidlist) + return default_min_version # if no opsets specified + + +def make_node( + op_type: str, + inputs: Sequence[str], + outputs: Sequence[str], + name: str | None = None, + doc_string: str | None = None, + domain: str | None = None, + overload: str | None = None, + **kwargs: Any, +) -> NodeProto: + """Construct a NodeProto. + + Args: + op_type (string): The name of the operator to construct + inputs (list of string): list of input names + outputs (list of string): list of output names + name (string, default None): optional unique identifier for NodeProto + doc_string (string, default None): optional documentation string for NodeProto + domain (string, default None): optional domain for NodeProto. + If it's None, we will just use default domain (which is empty) + overload (string, default None): optional field, used to + resolve calls to model-local functions + **kwargs (dict): the attributes of the node. The acceptable values + are documented in :func:`make_attribute`. + + Returns: + NodeProto + """ + node = NodeProto() + node.op_type = op_type + node.input.extend(inputs) + node.output.extend(outputs) + if name: + node.name = name + if doc_string: + node.doc_string = doc_string + if domain is not None: + node.domain = domain + if overload is not None: + node.overload = overload + if kwargs: + node.attribute.extend( + make_attribute(key, value) + for key, value in sorted(kwargs.items()) + if value is not None + ) + return node + + +def make_operatorsetid( + domain: str, + version: int, +) -> OperatorSetIdProto: + """Construct an OperatorSetIdProto. + + Args: + domain (string): The domain of the operator set id + version (integer): Version of operator set id + Returns: + OperatorSetIdProto + """ + operatorsetid = OperatorSetIdProto() + operatorsetid.domain = domain + operatorsetid.version = version + return operatorsetid + + +def make_graph( + nodes: Sequence[NodeProto], + name: str, + inputs: Sequence[ValueInfoProto], + outputs: Sequence[ValueInfoProto], + initializer: Sequence[TensorProto] | None = None, + doc_string: str | None = None, + value_info: Sequence[ValueInfoProto] | None = None, + sparse_initializer: Sequence[onnx.SparseTensorProto] | None = None, +) -> GraphProto: + """Construct a GraphProto + + Args: + nodes: list of NodeProto + name (string): graph name + inputs: list of ValueInfoProto + outputs: list of ValueInfoProto + initializer: list of TensorProto + doc_string (string): graph documentation + value_info: list of ValueInfoProto + sparse_initializer: list of onnx.SparseTensorProto + Returns: + GraphProto + """ + if initializer is None: + initializer = [] + if sparse_initializer is None: + sparse_initializer = [] + if value_info is None: + value_info = [] + graph = GraphProto() + graph.node.extend(nodes) + graph.name = name + graph.input.extend(inputs) + graph.output.extend(outputs) + graph.initializer.extend(initializer) + graph.sparse_initializer.extend(sparse_initializer) + graph.value_info.extend(value_info) + if doc_string: + graph.doc_string = doc_string + return graph + + +def make_opsetid(domain: str, version: int) -> OperatorSetIdProto: + """Construct an OperatorSetIdProto. + + Args: + domain (string): The domain of the operator set id + version (integer): Version of operator set id + Returns: + OperatorSetIdProto + """ + opsetid = OperatorSetIdProto() + opsetid.domain = domain + opsetid.version = version + return opsetid + + +def make_function( + domain: str, + fname: str, + inputs: Sequence[str], + outputs: Sequence[str], + nodes: Sequence[NodeProto], + opset_imports: Sequence[OperatorSetIdProto], + attributes: Sequence[str] | None = None, + attribute_protos: Sequence[AttributeProto] | None = None, + doc_string: str | None = None, + overload: str | None = None, + value_info: Sequence[ValueInfoProto] | None = None, +) -> FunctionProto: + if attributes is None: + attributes = [] + if attribute_protos is None: + attribute_protos = [] + if value_info is None: + value_info = [] + f = FunctionProto() + f.domain = domain + f.name = fname + f.input.extend(inputs) + f.output.extend(outputs) + f.node.extend(nodes) + f.opset_import.extend(opset_imports) + f.attribute.extend(attributes) + f.attribute_proto.extend(attribute_protos) + if doc_string: + f.doc_string = doc_string + if overload is not None: + f.overload = overload + f.value_info.extend(value_info) + return f + + +def make_model(graph: GraphProto, **kwargs: Any) -> ModelProto: + """Construct a ModelProto + + Args: + graph (GraphProto): *make_graph* returns + **kwargs: any attribute to add to the returned instance + Returns: + ModelProto + """ + model = ModelProto() + # Touch model.ir_version so it is stored as the version from which it is + # generated. + model.ir_version = onnx.IR_VERSION + model.graph.CopyFrom(graph) + + opset_imports: Sequence[OperatorSetIdProto] | None = kwargs.pop( + "opset_imports", None + ) + if opset_imports is not None: + model.opset_import.extend(opset_imports) + else: + # Default import + imp = model.opset_import.add() + imp.version = defs.onnx_opset_version() + + functions: Sequence[FunctionProto] | None = kwargs.pop("functions", None) + if functions is not None: + model.functions.extend(functions) + + for k, v in kwargs.items(): + # TODO: Does this work with repeated fields? + setattr(model, k, v) + return model + + +# An extension of make_model that infers an IR_VERSION for the model, +# if not specified, using a best-effort-basis. +def make_model_gen_version(graph: GraphProto, **kwargs: Any) -> ModelProto: + ir_version_field = "ir_version" + if ir_version_field not in kwargs: + opset_imports_field = "opset_imports" + imports = kwargs.get(opset_imports_field, []) + kwargs[ir_version_field] = find_min_ir_version_for(imports) + return make_model(graph, **kwargs) + + +def set_metadata_props( + proto: ( + ModelProto + | GraphProto + | FunctionProto + | NodeProto + | TensorProto + | ValueInfoProto + ), + dict_value: dict[str, str], +) -> None: + del proto.metadata_props[:] + for k, v in dict_value.items(): + entry = proto.metadata_props.add() + entry.key = k + entry.value = v + + +def set_model_props(model: ModelProto, dict_value: dict[str, str]) -> None: + set_metadata_props(model, dict_value) + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion" +) +def float32_to_bfloat16(fval: float, truncate: bool = False) -> int: + # convert a float32 value to a bfloat16 (as int) + # By default, this conversion rounds-to-nearest-even and supports NaN + # Setting `truncate` to True enables a simpler conversion. In this mode the + # conversion is performed by simply dropping the 2 least significant bytes of + # the significand. In this mode an error of up to 1 bit may be introduced and + # preservation of NaN values is not be guaranteed. + ival = int.from_bytes(struct.pack("> 16 + # NaN requires at least 1 significand bit set + if isnan(fval): + return 0x7FC0 # sign=0, exp=all-ones, sig=0b1000000 + # drop bottom 16-bits + # round remaining bits using round-to-nearest-even + rounded = ((ival >> 16) & 1) + 0x7FFF + return (ival + rounded) >> 16 + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion" +) +def float32_to_float8e4m3( # noqa: PLR0911 + fval: float, + scale: float = 1.0, + fn: bool = True, + uz: bool = False, + saturate: bool = True, +) -> int: + """Convert a float32 value to a float8, e4m3 (as int). + + See :ref:`onnx-detail-float8` for technical details. + + Args: + fval: float to convert + scale: scale, divide *fval* by *scale* before casting it + fn: no infinite values + uz: no negative zero + saturate: if True, any value out of range included inf becomes + the maximum value, otherwise, it becomes NaN. The + description of operator Cast fully describes the + differences. + + Returns: + converted float + """ + if not fn: + raise NotImplementedError( + "float32_to_float8e4m3 not implemented with fn=False." + ) + x = fval / scale + b = int.from_bytes(struct.pack("> 24 # sign + if uz: + if (b & 0x7FC00000) == 0x7FC00000: # noqa: PLR2004 + return 0x80 + if np.isinf(x): + if saturate: + return ret | 127 + return 0x80 + e = (b & 0x7F800000) >> 23 # exponent + m = b & 0x007FFFFF # mantissa + + if e < 116: # noqa: PLR2004 + ret = 0 + elif e < 120: # noqa: PLR2004 + # denormalized number + ex = e - 119 + if ex >= -2: # noqa: PLR2004 + ret |= 1 << (2 + ex) + ret |= m >> (21 - ex) + elif m > 0: + ret |= 1 + else: + ret = 0 + mask = 1 << (20 - ex) + if m & mask and ( + ret & 1 + or m & (mask - 1) > 0 + or (m & mask and m & (mask << 1) and m & (mask - 1) == 0) + ): + # rounding + ret += 1 + elif e < 135: # noqa: PLR2004 + # normalized number + ex = e - 119 # 127 - 8 + if ex == 0: + ret |= 0x4 + ret |= m >> 21 + else: + ret |= ex << 3 + ret |= m >> 20 + if m & 0x80000 and ((m & 0x100000) or (m & 0x7FFFF)): + if (ret & 0x7F) < 0x7F: # noqa: PLR2004 + # rounding + ret += 1 + elif not saturate: + return 0x80 + elif saturate: + ret |= 0x7F # 01111110 + else: + ret = 0x80 + return int(ret) + else: + if (b & 0x7FC00000) == 0x7FC00000: # noqa: PLR2004 + return 0x7F | ret + if np.isinf(x): + if saturate: + return ret | 126 + return 0x7F | ret + e = (b & 0x7F800000) >> 23 # exponent + m = b & 0x007FFFFF # mantissa + + if e != 0: + if e < 117: # noqa: PLR2004 + pass + elif e < 121: # noqa: PLR2004 + # denormalized number + ex = e - 120 + if ex >= -2: # noqa: PLR2004 + ret |= 1 << (2 + ex) + ret |= m >> (21 - ex) + elif m > 0: + ret |= 1 + mask = 1 << (20 - ex) + if m & mask and ( + ret & 1 + or m & (mask - 1) > 0 + or (m & mask and m & (mask << 1) and m & (mask - 1) == 0) + ): + # rounding + ret += 1 + elif e < 136: # noqa: PLR2004 + # normalized number + ex = e - 120 + if ex == 0: + ret |= 0x4 + ret |= m >> 21 + else: + ret |= ex << 3 + ret |= m >> 20 + if (ret & 0x7F) == 0x7F: # noqa: PLR2004 + ret &= 0xFE + if (m & 0x80000) and ((m & 0x100000) or (m & 0x7FFFF)): + if (ret & 0x7F) < 0x7E: # noqa: PLR2004 + # rounding + ret += 1 + elif not saturate: + ret |= 0x7F + elif saturate: + ret |= 126 # 01111110 + else: + ret |= 0x7F + return int(ret) + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion" +) +def float32_to_float8e5m2( # noqa: PLR0911 + fval: float, + scale: float = 1.0, + fn: bool = False, + uz: bool = False, + saturate: bool = True, +) -> int: + """Convert a float32 value to a float8, e5m2 (as int). + + Args: + fval: float to convert + scale: scale, divide *fval* by *scale* before casting it + fn: no infinite values + uz: no negative zero + saturate: if True, any value out of range included inf becomes + the maximum value, otherwise, it becomes NaN. The + description of operator Cast fully describes the + differences. + + Returns: + converted float + """ + x = fval / scale + b = int.from_bytes(struct.pack("> 24 # sign + + if fn and uz: + if (b & 0x7FC00000) == 0x7FC00000: # noqa: PLR2004 + return 0x80 + if (b & 0x7FFFFFFF) == 0x7F800000: # noqa: PLR2004 + # inf + if saturate: + return ret | 0x7F + return 0x80 + e = (b & 0x7F800000) >> 23 # exponent + m = b & 0x007FFFFF # mantissa + + if e < 109: # noqa: PLR2004 + ret = 0 + elif e < 112: # noqa: PLR2004 + # denormalized number + ex = e - 111 + if ex >= -1: + ret |= 1 << (1 + ex) + ret |= m >> (22 - ex) + elif m > 0: + ret |= 1 + else: + ret = 0 + mask = 1 << (21 - ex) + if m & mask and ( + ret & 1 + or m & (mask - 1) > 0 + or (m & mask and m & (mask << 1) and m & (mask - 1) == 0) + ): + # rounding + ret += 1 + elif e < 143: # noqa: PLR2004 + # normalized number + ex = e - 111 + ret |= ex << 2 + ret |= m >> 21 + if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)): + if (ret & 0x7F) < 0x7F: # noqa: PLR2004 + # rounding + ret += 1 + elif not saturate: + ret = 0x80 + elif e == 255 and m == 0: # inf # noqa: PLR2004 + ret = 0x80 + elif saturate: + ret |= 0x7F # last possible number + else: + ret = 0x80 + return int(ret) + elif not fn and not uz: + if (b & 0x7FC00000) == 0x7FC00000: # noqa: PLR2004 + return 0x7F | ret + if np.isinf(x): + if saturate: + return 0x7B | ret + return 0x7C | ret + e = (b & 0x7F800000) >> 23 # exponent + m = b & 0x007FFFFF # mantissa + + if e != 0: + if e < 110: # noqa: PLR2004 + pass + elif e < 113: # noqa: PLR2004 + # denormalized number + ex = e - 112 + if ex >= -1: + ret |= 1 << (1 + ex) + ret |= m >> (22 - ex) + elif m > 0: + ret |= 1 + mask = 1 << (21 - ex) + if m & mask and ( + ret & 1 + or m & (mask - 1) > 0 + or (m & mask and m & (mask << 1) and m & (mask - 1) == 0) + ): + # rounding + ret += 1 + elif e < 143: # noqa: PLR2004 + # normalized number + ex = e - 112 + ret |= ex << 2 + ret |= m >> 21 + if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)): + if (ret & 0x7F) < 0x7B: # noqa: PLR2004 + # rounding + ret += 1 + elif saturate: + ret |= 0x7B + else: + ret |= 0x7C + elif saturate: + ret |= 0x7B + else: + ret |= 0x7C + return int(ret) + else: + raise NotImplementedError("fn and uz must be both False or True.") + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion" +) +def pack_float32_to_4bit(array: np.ndarray | Sequence, signed: bool) -> np.ndarray: + """Convert an array of float32 value to a 4bit data-type and pack every two concecutive elements in a byte. + See :ref:`onnx-detail-int4` for technical details. + + Args: + array: array of float to convert and pack + signed: Whether the 4 bit variant is signed or unsigned + + Returns: + Packed array with size `ceil(farray.size/2)` (single dimension). + """ + if not isinstance(array, np.ndarray): + array = np.asarray(array, dtype=np.float32) + + array_flat: np.ndarray = array.ravel() + is_odd_volume = np.prod(array.shape) % 2 == 1 + if is_odd_volume: + array_flat = np.append(array_flat, np.array([0])) + + def single_func(x, y) -> np.ndarray: + return subbyte.float32x2_to_4bitx2(x, y, signed) + + func = np.frompyfunc(single_func, 2, 1) + + arr: np.ndarray = func(array_flat[0::2], array_flat[1::2]) + return arr.astype(np.uint8) + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion" +) +def pack_float32_to_float4e2m1(array: np.ndarray | Sequence) -> np.ndarray: + """Convert an array of float32 value to float4e2m1 and pack every two concecutive elements in a byte. + See :ref:`onnx-detail-float4` for technical details. + + Args: + array: array of float to convert and pack + + Returns: + Packed array of float4e2m1 (as uint8) with size `ceil(farray.size/2)` (single dimension). + """ + if not isinstance(array, np.ndarray): + array = np.asarray(array, dtype=np.float32) + + array_flat: np.ndarray = array.ravel() + is_odd_volume = np.prod(array.shape) % 2 == 1 + if is_odd_volume: + array_flat = np.append(array_flat, np.array([0])) + + arr = subbyte.float32x2_to_float4e2m1x2(array_flat[0::2], array_flat[1::2]) + return arr.astype(np.uint8) + + +def _pack_4bitx2(array: np.ndarray) -> npt.NDArray[np.uint8]: + """Convert a numpy array to flatten, packed int4/uint4. Elements must be in the correct range.""" + # Create a 1D copy + array_flat = array.ravel().view(np.uint8).copy() + size = array.size + odd_sized = size % 2 == 1 + if odd_sized: + array_flat.resize([size + 1], refcheck=False) + array_flat &= 0x0F + array_flat[1::2] <<= 4 + return array_flat[0::2] | array_flat[1::2] # type: ignore[return-type] + + +def make_tensor( + name: str, + data_type: int, + dims: Sequence[int], + vals: Sequence[int | float] | bytes | np.ndarray, + raw: bool = False, +) -> TensorProto: + """Make a TensorProto with specified arguments. If raw is False, this + function will choose the corresponding proto field to store the + values based on data_type. If raw is True, use "raw_data" proto + field to store the values, and values should be of type bytes in + this case. + + Args: + name: tensor name + data_type: a value such as onnx.TensorProto.FLOAT + dims: shape + vals: values + raw: if True, vals contains the serialized content of the tensor, + otherwise, vals should be a list of values of the type defined by ``data_type``. + + Returns: + TensorProto + """ + tensor = TensorProto() + tensor.data_type = data_type + tensor.name = name + tensor.dims.extend(dims) + + if data_type == TensorProto.STRING and raw: + raise TypeError("Can not use raw_data to store string type.") + + np_dtype = tensor_dtype_to_np_dtype(data_type) + + if raw: + # NumPy doesn't have INT4/FP4. It is packed in couples to UINT8 buffers. + if data_type in {TensorProto.UINT4, TensorProto.INT4, TensorProto.FLOAT4E2M1}: + expected_size_bytes = 0.5 + else: + expected_size_bytes = np_dtype.itemsize + expected_size_bytes *= math.prod(dims) + expected_size_bytes = math.ceil(expected_size_bytes) + if isinstance(vals, np.ndarray): + raw_data = vals.tobytes() + elif isinstance(vals, bytes): + raw_data = vals + else: + raise TypeError( + f"Raw data must be bytes or numpy.ndarray, but got {type(vals)}." + ) + if len(raw_data) != expected_size_bytes: + raise ValueError( + f"Raw data size does not match tensor's size. Expected {expected_size_bytes} bytes, but got {len(raw_data)} bytes." + ) + tensor.raw_data = raw_data + return tensor + + assert not raw, "Bug: raw should be False at this point." + + if data_type == TensorProto.STRING: + vals = np.array(vals).flatten() + if len(vals) != 0: + vals = np.vectorize(_to_bytes)(vals) # Convert to bytes + elif data_type in { + TensorProto.FLOAT8E4M3FN, + TensorProto.FLOAT8E4M3FNUZ, + TensorProto.FLOAT8E5M2, + TensorProto.FLOAT8E5M2FNUZ, + }: + # Float8 values are by default casted using saturating cast. + vals = onnx.numpy_helper.saturate_cast(np.asarray(vals), np_dtype).flatten() + elif data_type == TensorProto.FLOAT8E8M0: + vals = onnx.numpy_helper.to_float8e8m0( + np.asarray(vals), saturate=True, round_mode="up" + ).flatten() + else: + vals = np.asarray(vals, dtype=np_dtype).flatten() + + if data_type == TensorProto.COMPLEX128: + vals = vals.view(np.float64) # type: ignore[union-attr] + elif data_type == TensorProto.COMPLEX64: + vals = vals.view(np.float32) # type: ignore[union-attr] + elif data_type in {TensorProto.BFLOAT16, TensorProto.FLOAT16}: + vals = vals.view(np.uint16) # type: ignore[union-attr] + elif data_type in { + TensorProto.FLOAT8E4M3FN, + TensorProto.FLOAT8E4M3FNUZ, + TensorProto.FLOAT8E5M2, + TensorProto.FLOAT8E5M2FNUZ, + TensorProto.FLOAT8E8M0, + }: + vals = vals.view(np.uint8) # type: ignore[union-attr] + elif data_type in {TensorProto.UINT4, TensorProto.INT4, TensorProto.FLOAT4E2M1}: + # Convert to packed 4-bit representation + vals = _pack_4bitx2(vals) # type: ignore[union-attr,arg-type] + elif data_type == TensorProto.BOOL: + vals = vals.astype(np.uint8) # type: ignore[union-attr] + + field = tensor_dtype_to_field(data_type) + getattr(tensor, field).extend(vals) + return tensor + + +def make_sparse_tensor( + values: TensorProto, indices: TensorProto, dims: Sequence[int] +) -> onnx.SparseTensorProto: + """Construct a SparseTensorProto + + Args: + values (TensorProto): the values + indices (TensorProto): the indices + dims: the shape + + Returns: + SparseTensorProto + """ + sparse = onnx.SparseTensorProto() + sparse.values.CopyFrom(values) + sparse.indices.CopyFrom(indices) + sparse.dims.extend(dims) + return sparse + + +def make_sequence( + name: str, + elem_type: SequenceProto.DataType, + values: Sequence[Any], +) -> SequenceProto: + """Make a Sequence with specified value arguments.""" + sequence = SequenceProto() + sequence.name = name + sequence.elem_type = elem_type + + if elem_type == SequenceProto.UNDEFINED: + return sequence + + attribute: RepeatedCompositeFieldContainer | None = None + if elem_type == SequenceProto.TENSOR: + attribute = sequence.tensor_values + elif elem_type == SequenceProto.SPARSE_TENSOR: + attribute = sequence.sparse_tensor_values + elif elem_type == SequenceProto.SEQUENCE: + attribute = sequence.sequence_values + elif elem_type == SequenceProto.MAP: + attribute = sequence.map_values + elif elem_type == OptionalProto.OPTIONAL: + attribute = sequence.optional_values + else: + raise TypeError("The element type in the input sequence is not supported.") + + attribute.extend(values) + return sequence + + +def make_map( + name: str, key_type: int, keys: list[Any], values: SequenceProto +) -> MapProto: + """Make a Map with specified key-value pair arguments. + + Criteria for conversion: + - Keys and Values must have the same number of elements + - Every key in keys must be of the same type + - Every value in values must be of the same type + """ + map_proto = MapProto() + valid_key_int_types = [ + TensorProto.INT8, + TensorProto.INT16, + TensorProto.INT32, + TensorProto.INT64, + TensorProto.UINT8, + TensorProto.UINT16, + TensorProto.UINT32, + TensorProto.UINT64, + ] + map_proto.name = name + map_proto.key_type = key_type + if key_type == TensorProto.STRING: + map_proto.string_keys.extend(keys) + elif key_type in valid_key_int_types: + map_proto.keys.extend(keys) + map_proto.values.CopyFrom(values) + return map_proto + + +def make_optional( + name: str, + elem_type: OptionalProto.DataType, + value: google.protobuf.message.Message | None, +) -> OptionalProto: + """Make an Optional with specified value arguments.""" + optional = OptionalProto() + optional.name = name + optional.elem_type = elem_type + + if elem_type == OptionalProto.UNDEFINED: + return optional + attribute: google.protobuf.message.Message | None = None + if elem_type == OptionalProto.TENSOR: + attribute = optional.tensor_value + elif elem_type == OptionalProto.SPARSE_TENSOR: + attribute = optional.sparse_tensor_value + elif elem_type == OptionalProto.SEQUENCE: + attribute = optional.sequence_value + elif elem_type == OptionalProto.MAP: + attribute = optional.map_value + elif elem_type == OptionalProto.OPTIONAL: + attribute = optional.optional_value + else: + raise TypeError("The element type in the input optional is not supported.") + + assert value is not None + attribute.CopyFrom(value) # type: ignore[arg-type] + return optional + + +def _to_bytes(value: str | bytes) -> bytes: + """Coerce a string (or bytes) value into UTF-8 bytes.""" + if isinstance(value, str): + return value.encode("utf-8") + return value + + +def make_attribute( + key: str, + value: Any, + doc_string: str | None = None, + attr_type: int | None = None, +) -> AttributeProto: + """Makes an AttributeProto based on the value type.""" + attr = AttributeProto() + attr.name = key + if doc_string: + attr.doc_string = doc_string + + # Singular cases + if isinstance(value, numbers.Integral): + attr.i = int(value) + attr.type = AttributeProto.INT + elif isinstance(value, numbers.Real): + attr.f = float(value) + attr.type = AttributeProto.FLOAT + elif isinstance(value, (str, bytes)): + # Encode strings into utf-8 + attr.s = _to_bytes(value) + attr.type = AttributeProto.STRING + elif isinstance(value, TensorProto): + attr.t.CopyFrom(value) + attr.type = AttributeProto.TENSOR + elif isinstance(value, onnx.SparseTensorProto): + attr.sparse_tensor.CopyFrom(value) + attr.type = AttributeProto.SPARSE_TENSOR + elif isinstance(value, GraphProto): + attr.g.CopyFrom(value) + attr.type = AttributeProto.GRAPH + elif isinstance(value, TypeProto): + attr.tp.CopyFrom(value) + attr.type = AttributeProto.TYPE_PROTO + # Iterable cases + elif isinstance(value, collections.abc.Iterable): + value = list(value) + if len(value) == 0 and attr_type is None: + raise ValueError( + f"Could not infer attribute `{key}` type from empty iterator" + ) + if attr_type is None: + types = {type(v) for v in value} + for exp_t, exp_enum in ( + (numbers.Integral, AttributeProto.INTS), + (numbers.Real, AttributeProto.FLOATS), + ((str, bytes), AttributeProto.STRINGS), + (TensorProto, AttributeProto.TENSORS), + (onnx.SparseTensorProto, AttributeProto.SPARSE_TENSORS), + (GraphProto, AttributeProto.GRAPHS), + (TypeProto, AttributeProto.TYPE_PROTOS), + ): + if all(issubclass(t, exp_t) for t in types): # type: ignore[arg-type] + attr_type = exp_enum + break + if attr_type is None: + raise ValueError( + "Could not infer the attribute type from the elements of the passed Iterable value." + ) + + if attr_type == AttributeProto.INTS: + attr.ints.extend(value) + attr.type = AttributeProto.INTS + elif attr_type == AttributeProto.FLOATS: + attr.floats.extend(value) + attr.type = AttributeProto.FLOATS + elif attr_type == AttributeProto.STRINGS: + attr.strings.extend(_to_bytes(v) for v in value) + attr.type = AttributeProto.STRINGS + elif attr_type == AttributeProto.TENSORS: + attr.tensors.extend(value) + attr.type = AttributeProto.TENSORS + elif attr_type == AttributeProto.SPARSE_TENSORS: + attr.sparse_tensors.extend(value) + attr.type = AttributeProto.SPARSE_TENSORS + elif attr_type == AttributeProto.GRAPHS: + attr.graphs.extend(value) + attr.type = AttributeProto.GRAPHS + elif attr_type == AttributeProto.TYPE_PROTOS: + attr.type_protos.extend(value) + attr.type = AttributeProto.TYPE_PROTOS + else: + raise AssertionError() # Should not reach since `ValueError` must be raised in attr_type checking + else: + raise TypeError(f"'{value}' is not an accepted attribute value.") + + if attr_type is not None and attr.type != attr_type: + raise TypeError( + f"Inferred attribute type '{_attr_type_to_str(attr.type)}'({attr.type}) mismatched with specified type '{_attr_type_to_str(attr_type)}'({attr_type})" + ) + return attr + + +def make_attribute_ref( + name: str, attr_type: AttributeProto.AttributeType, doc_string: str | None = None +) -> AttributeProto: + """Make an AttributeProto holding a reference to the parent function's attribute of given name and type.""" + attr = AttributeProto() + attr.name = name + attr.type = attr_type + if doc_string: + attr.doc_string = doc_string + return attr + + +def get_attribute_value(attr: AttributeProto) -> Any: # noqa: PLR0911 + if attr.ref_attr_name: + raise ValueError(f"Cannot get value of reference attribute: {attr}") + if attr.type == AttributeProto.FLOAT: + return attr.f + if attr.type == AttributeProto.INT: + return attr.i + if attr.type == AttributeProto.STRING: + return attr.s + if attr.type == AttributeProto.TENSOR: + return attr.t + if attr.type == AttributeProto.SPARSE_TENSOR: + return attr.sparse_tensor + if attr.type == AttributeProto.GRAPH: + return attr.g + if attr.type == AttributeProto.TYPE_PROTO: + return attr.tp + if attr.type == AttributeProto.FLOATS: + return list(attr.floats) + if attr.type == AttributeProto.INTS: + return list(attr.ints) + if attr.type == AttributeProto.STRINGS: + return list(attr.strings) + if attr.type == AttributeProto.TENSORS: + return list(attr.tensors) + if attr.type == AttributeProto.SPARSE_TENSORS: + return list(attr.sparse_tensors) + if attr.type == AttributeProto.GRAPHS: + return list(attr.graphs) + if attr.type == AttributeProto.TYPE_PROTOS: + return list(attr.type_protos) + if attr.type == AttributeProto.UNDEFINED: + return None + raise ValueError(f"Unsupported ONNX attribute: {attr}") + + +def get_node_attr_value(node: NodeProto, attr_name: str) -> Any: + matching = [x for x in node.attribute if x.name == attr_name] + if len(matching) > 1: + raise ValueError(f"Node has multiple attributes with name {attr_name}") + if len(matching) < 1: + raise ValueError(f"Node has no attribute with name {attr_name}") + return get_attribute_value(matching[0]) + + +def make_empty_tensor_value_info(name: str) -> ValueInfoProto: + value_info_proto = ValueInfoProto() + value_info_proto.name = name + return value_info_proto + + +def make_tensor_type_proto( + elem_type: int, + shape: Sequence[str | int | None] | None, + shape_denotation: list[str] | None = None, +) -> TypeProto: + """Makes a Tensor TypeProto based on the data type and shape.""" + type_proto = TypeProto() + tensor_type_proto = type_proto.tensor_type + tensor_type_proto.elem_type = elem_type + tensor_shape_proto = tensor_type_proto.shape + + if shape is not None: + # You might think this is a no-op (extending a normal Python + # list by [] certainly is), but protobuf lists work a little + # differently; if a field is never set, it is omitted from the + # resulting protobuf; a list that is explicitly set to be + # empty will get an (empty) entry in the protobuf. This + # difference is visible to our consumers, so make sure we emit + # an empty shape! + tensor_shape_proto.dim.extend([]) + + if shape_denotation and len(shape_denotation) != len(shape): + raise ValueError( + "Invalid shape_denotation. Must be of the same length as shape." + ) + + for i, d in enumerate(shape): + dim = tensor_shape_proto.dim.add() + if d is None: + pass + elif isinstance(d, int): + dim.dim_value = d + elif isinstance(d, str): + dim.dim_param = d + else: + raise ValueError( + f"Invalid item in shape: {d}. Needs to be of int or str." + ) + + if shape_denotation: + dim.denotation = shape_denotation[i] + + return type_proto + + +def make_tensor_value_info( + name: str, + elem_type: int, + shape: Sequence[str | int | None] | None, + doc_string: str = "", + shape_denotation: list[str] | None = None, +) -> ValueInfoProto: + """Makes a ValueInfoProto based on the data type and shape.""" + value_info_proto = ValueInfoProto() + value_info_proto.name = name + if doc_string: + value_info_proto.doc_string = doc_string + + tensor_type_proto = make_tensor_type_proto(elem_type, shape, shape_denotation) + value_info_proto.type.CopyFrom(tensor_type_proto) + return value_info_proto + + +def make_sparse_tensor_type_proto( + elem_type: int, + shape: Sequence[str | int | None] | None, + shape_denotation: list[str] | None = None, +) -> TypeProto: + """Makes a SparseTensor TypeProto based on the data type and shape.""" + type_proto = TypeProto() + sparse_tensor_type_proto = type_proto.sparse_tensor_type + sparse_tensor_type_proto.elem_type = elem_type + sparse_tensor_shape_proto = sparse_tensor_type_proto.shape + + if shape is not None: + # You might think this is a no-op (extending a normal Python + # list by [] certainly is), but protobuf lists work a little + # differently; if a field is never set, it is omitted from the + # resulting protobuf; a list that is explicitly set to be + # empty will get an (empty) entry in the protobuf. This + # difference is visible to our consumers, so make sure we emit + # an empty shape! + sparse_tensor_shape_proto.dim.extend([]) + + if shape_denotation and len(shape_denotation) != len(shape): + raise ValueError( + "Invalid shape_denotation. Must be of the same length as shape." + ) + + for i, d in enumerate(shape): + dim = sparse_tensor_shape_proto.dim.add() + if d is None: + pass + elif isinstance(d, int): + dim.dim_value = d + elif isinstance(d, str): + dim.dim_param = d + else: + raise ValueError( + f"Invalid item in shape: {d}. Needs to be of int or text." + ) + + if shape_denotation: + dim.denotation = shape_denotation[i] + + return type_proto + + +def make_sparse_tensor_value_info( + name: str, + elem_type: int, + shape: Sequence[str | int | None] | None, + doc_string: str = "", + shape_denotation: list[str] | None = None, +) -> ValueInfoProto: + """Makes a SparseTensor ValueInfoProto based on the data type and shape.""" + value_info_proto = ValueInfoProto() + value_info_proto.name = name + if doc_string: + value_info_proto.doc_string = doc_string + + sparse_tensor_type_proto = make_sparse_tensor_type_proto( + elem_type, shape, shape_denotation + ) + value_info_proto.type.sparse_tensor_type.CopyFrom( + sparse_tensor_type_proto.sparse_tensor_type + ) + return value_info_proto + + +def make_sequence_type_proto( + inner_type_proto: TypeProto, +) -> TypeProto: + """Makes a sequence TypeProto.""" + type_proto = TypeProto() + type_proto.sequence_type.elem_type.CopyFrom(inner_type_proto) + return type_proto + + +def make_optional_type_proto( + inner_type_proto: TypeProto, +) -> TypeProto: + """Makes an optional TypeProto.""" + type_proto = TypeProto() + type_proto.optional_type.elem_type.CopyFrom(inner_type_proto) + return type_proto + + +def make_map_type_proto( + key_type: int, + value_type: TypeProto, +) -> TypeProto: + """Makes a map TypeProto.""" + type_proto = TypeProto() + type_proto.map_type.key_type = key_type + type_proto.map_type.value_type.CopyFrom(value_type) + return type_proto + + +def make_value_info( + name: str, + type_proto: TypeProto, + doc_string: str = "", +) -> ValueInfoProto: + """Makes a ValueInfoProto with the given type_proto.""" + value_info_proto = ValueInfoProto() + value_info_proto.name = name + if doc_string: + value_info_proto.doc_string = doc_string + + value_info_proto.type.CopyFrom(type_proto) + return value_info_proto + + +def _sanitize_str(s: str | bytes) -> str: + if isinstance(s, str): + sanitized = s + elif isinstance(s, bytes): + sanitized = s.decode("utf-8", errors="ignore") + else: + sanitized = str(s) + if len(sanitized) < 64: # noqa: PLR2004 + return sanitized + return sanitized[:64] + f"...<+len={(len(sanitized) - 64)}>" + + +def make_tensor_sequence_value_info( + name: str, + elem_type: int, + shape: Sequence[str | int | None] | None, + doc_string: str = "", + elem_shape_denotation: list[str] | None = None, +) -> ValueInfoProto: + """Makes a Sequence[Tensors] ValueInfoProto based on the data type and shape.""" + value_info_proto = ValueInfoProto() + value_info_proto.name = name + if doc_string: + value_info_proto.doc_string = doc_string + + tensor_type_proto = make_tensor_type_proto(elem_type, shape, elem_shape_denotation) + sequence_type_proto = make_sequence_type_proto(tensor_type_proto) + value_info_proto.type.sequence_type.CopyFrom(sequence_type_proto.sequence_type) + + return value_info_proto + + +def printable_attribute( + attr: AttributeProto, subgraphs: bool = False +) -> str | tuple[str, list[GraphProto]]: + content = [] + content.append(attr.name) + content.append("=") + + def str_float(f: float) -> str: + # NB: Different Python versions print different numbers of trailing + # decimals, specifying this explicitly keeps it consistent for all + # versions + return f"{f:.15g}" + + def str_int(i: int) -> str: + return str(i) + + _T = TypeVar("_T") + + def str_list(str_elem: Callable[[_T], str], xs: Sequence[_T]) -> str: + return "[" + ", ".join(map(str_elem, xs)) + "]" + + # for now, this logic should continue to work as long as we are running on a proto3 + # implementation. If/when we switch to proto3, we will need to use attr.type + + # To support printing subgraphs, if we find a graph attribute, print out + # its name here and pass the graph itself up to the caller for later + # printing. + graphs = [] + if attr.HasField("f"): + content.append(str_float(attr.f)) + elif attr.HasField("i"): + content.append(str_int(attr.i)) + elif attr.HasField("s"): + # TODO: Bit nervous about Python 2 / Python 3 determinism implications + content.append(repr(_sanitize_str(attr.s))) + elif attr.HasField("t"): + if len(attr.t.dims) > 0: + content.append("") + else: + # special case to print scalars + field = tensor_dtype_to_field(attr.t.data_type) + content.append(f"") + elif attr.HasField("g"): + content.append(f"") + graphs.append(attr.g) + elif attr.HasField("tp"): + content.append(f"") + elif attr.floats: + content.append(str_list(str_float, attr.floats)) + elif attr.ints: + content.append(str_list(str_int, attr.ints)) + elif attr.strings: + # TODO: Bit nervous about Python 2 / Python 3 determinism implications + content.append(str(list(map(_sanitize_str, attr.strings)))) + elif attr.tensors: + content.append("[, ...]") + elif attr.type_protos: + content.append("[") + for i, tp in enumerate(attr.type_protos): + comma = "," if i != len(attr.type_protos) - 1 else "" + content.append(f"{comma}") + content.append("]") + elif attr.graphs: + content.append("[") + for i, g in enumerate(attr.graphs): + comma = "," if i != len(attr.graphs) - 1 else "" + content.append(f"{comma}") + content.append("]") + graphs.extend(attr.graphs) + else: + content.append("") + if subgraphs: + return " ".join(content), graphs + return " ".join(content) + + +def printable_dim(dim: TensorShapeProto.Dimension) -> str: + which = dim.WhichOneof("value") + if which is None: + return "?" + return str(getattr(dim, which)) + + +def printable_type(t: TypeProto) -> str: + if t.WhichOneof("value") == "tensor_type": + s: str = TensorProto.DataType.Name(t.tensor_type.elem_type) # type: ignore[attr-defined] + if t.tensor_type.HasField("shape"): + if len(t.tensor_type.shape.dim): + s += str(", " + "x".join(map(printable_dim, t.tensor_type.shape.dim))) + else: + s += ", scalar" + return s + if t.WhichOneof("value") is None: + return "" + return f"Unknown type {t.WhichOneof('value')}" + + +def printable_value_info(v: ValueInfoProto) -> str: + s = f"%{v.name}" + if v.type: + s = f"{s}[{printable_type(v.type)}]" + return s + + +def printable_tensor_proto(t: TensorProto) -> str: + s = f"%{t.name}[" + s += TensorProto.DataType.Name(t.data_type) # type: ignore[attr-defined] + if t.dims is not None: + if len(t.dims): + s += str(", " + "x".join(map(str, t.dims))) + else: + s += ", scalar" + s += "]" + return s + + +def printable_node( + node: NodeProto, prefix: str = "", subgraphs: bool = False +) -> str | tuple[str, list[GraphProto]]: + content = [] + if len(node.output): + content.append(", ".join([f"%{name}" for name in node.output])) + content.append("=") + # To deal with nested graphs + graphs: list[GraphProto] = [] + printed_attrs = [] + for attr in node.attribute: + if subgraphs: + printed_attr_subgraphs = printable_attribute(attr, subgraphs) + if not isinstance(printed_attr_subgraphs[1], list): + raise TypeError( + f"printed_attr_subgraphs[1] must be an instance of {list}." + ) + graphs.extend(printed_attr_subgraphs[1]) + printed_attrs.append(printed_attr_subgraphs[0]) + else: + printed = printable_attribute(attr) + if not isinstance(printed, str): + raise TypeError(f"printed must be an instance of {str}.") + printed_attrs.append(printed) + printed_attributes = ", ".join(sorted(printed_attrs)) + printed_inputs = ", ".join([f"%{name}" for name in node.input]) + if node.attribute: + content.append(f"{node.op_type}[{printed_attributes}]({printed_inputs})") + else: + content.append(f"{node.op_type}({printed_inputs})") + if subgraphs: + return prefix + " ".join(content), graphs + return prefix + " ".join(content) + + +@typing_extensions.deprecated( + "Deprecated since 1.19. Consider using onnx.printer.to_text() instead." +) +def printable_graph(graph: GraphProto, prefix: str = "") -> str: + """Display a GraphProto as a string. + + .. deprecated:: 1.19 + Consider using :func:`onnx.printer.to_text` instead. + + Args: + graph (GraphProto): the graph to display + prefix (string): prefix of every line + + Returns: + string + """ + content = [] + indent = prefix + " " + # header + header = ["graph", graph.name] + initializers = {t.name for t in graph.initializer} + if len(graph.input): + header.append("(") + in_strs = [] # required inputs + in_with_init_strs: list = [] # optional inputs with initializer providing default value + for inp in graph.input: + if inp.name not in initializers: + in_strs.append(printable_value_info(inp)) + else: + in_with_init_strs.append(printable_value_info(inp)) + if in_strs: + content.append(prefix + " ".join(header)) + header = [] + for line in in_strs: + content.append(prefix + " " + line) # noqa: PERF401 + header.append(")") + + if in_with_init_strs: + header.append("optional inputs with matching initializers (") + content.append(prefix + " ".join(header)) + header = [] + for line in in_with_init_strs: + content.append(prefix + " " + line) # noqa: PERF401 + header.append(")") + + # from IR 4 onwards an initializer is not required to have a matching graph input + # so output the name, type and shape of those as well + if len(in_with_init_strs) < len(initializers): + graph_inputs = {i.name for i in graph.input} + init_strs = [ + printable_tensor_proto(i) + for i in graph.initializer + if i.name not in graph_inputs + ] + header.append("initializers (") + content.append(prefix + " ".join(header)) + header = [] + for line in init_strs: + content.append(prefix + " " + line) # noqa: PERF401 + header.append(")") + + header.append("{") + content.append(prefix + " ".join(header)) + graphs: list[GraphProto] = [] + # body + for node in graph.node: + contents_subgraphs = printable_node(node, indent, subgraphs=True) + if not isinstance(contents_subgraphs[1], list): + raise TypeError(f"contents_subgraphs[1] must be an instance of {list}.") + content.append(contents_subgraphs[0]) + graphs.extend(contents_subgraphs[1]) + # tail + tail = ["return"] + if len(graph.output): + tail.append(", ".join([f"%{out.name}" for out in graph.output])) + content.append(indent + " ".join(tail)) + # closing bracket + content.append(prefix + "}") + for g in graphs: + content.append("\n" + printable_graph(g)) # noqa: PERF401 + return "\n".join(content) + + +def strip_doc_string(proto: google.protobuf.message.Message) -> None: + """Empties `doc_string` field on any nested protobuf messages""" + if not isinstance(proto, google.protobuf.message.Message): + raise TypeError( + f"proto must be an instance of {google.protobuf.message.Message}." + ) + for descriptor in proto.DESCRIPTOR.fields: + if descriptor.name == "doc_string": + proto.ClearField(descriptor.name) + elif descriptor.type == descriptor.TYPE_MESSAGE: + if descriptor.label == descriptor.LABEL_REPEATED: + for x in getattr(proto, descriptor.name): + strip_doc_string(x) + elif proto.HasField(descriptor.name): + strip_doc_string(getattr(proto, descriptor.name)) + + +def make_training_info( + algorithm: GraphProto, + algorithm_bindings: AssignmentBindingType, + initialization: GraphProto | None, + initialization_bindings: AssignmentBindingType | None, +) -> TrainingInfoProto: + training_info = TrainingInfoProto() + training_info.algorithm.CopyFrom(algorithm) + for k, v in algorithm_bindings: + binding = training_info.update_binding.add() + binding.key = k + binding.value = v + + if initialization: + training_info.initialization.CopyFrom(initialization) + if initialization_bindings: + for k, v in initialization_bindings: + binding = training_info.initialization_binding.add() + binding.key = k + binding.value = v + + return training_info + + +# Following functions are used for mapping +def tensor_dtype_to_np_dtype(tensor_dtype: int) -> np.dtype: + """Convert a TensorProto's data_type to corresponding numpy dtype. It can be used while making tensor. + + Args: + tensor_dtype: TensorProto's data_type + + Returns: + numpy's data_type + """ + return _mapping.TENSOR_TYPE_MAP[tensor_dtype].np_dtype + + +def tensor_dtype_to_storage_tensor_dtype(tensor_dtype: int) -> int: + """Convert a TensorProto's data_type to corresponding data_type for storage. + + Args: + tensor_dtype: TensorProto's data_type + + Returns: + data_type for storage + """ + return _mapping.TENSOR_TYPE_MAP[tensor_dtype].storage_dtype + + +def tensor_dtype_to_string(tensor_dtype: int) -> str: + """Get the name of given TensorProto's data_type. + + Args: + tensor_dtype: TensorProto's data_type + + Returns: + the name of data_type + """ + return _mapping.TENSOR_TYPE_MAP[tensor_dtype].name + + +@functools.lru_cache(None) +def tensor_dtype_to_field(tensor_dtype: int) -> str: + """Convert a TensorProto's data_type to corresponding field name for storage. It can be used while making tensors. + + Args: + tensor_dtype: TensorProto's data_type + + Returns: + field name + """ + storage_tensor_type_to_field = { + int(TensorProto.FLOAT): "float_data", + int(TensorProto.INT32): "int32_data", + int(TensorProto.INT64): "int64_data", + int(TensorProto.DOUBLE): "double_data", + int(TensorProto.UINT32): "uint64_data", + int(TensorProto.UINT64): "uint64_data", + int(TensorProto.STRING): "string_data", + } + return storage_tensor_type_to_field[ + _mapping.TENSOR_TYPE_MAP[tensor_dtype].storage_dtype + ] + + +@functools.lru_cache(None) +def np_dtype_to_tensor_dtype(np_dtype: np.dtype) -> TensorProto.DataType: + """Convert a numpy's dtype to corresponding tensor type. It can be used while converting numpy arrays to tensors. + + Args: + np_dtype: numpy's data_type + + Returns: + TensorsProto's data_type + """ + _np_dtype_to_tensor_dtype = { + v.np_dtype: k for k, v in _mapping.TENSOR_TYPE_MAP.items() + } + if np_dtype in _np_dtype_to_tensor_dtype: + return typing.cast("TensorProto.DataType", _np_dtype_to_tensor_dtype[np_dtype]) + if np.issubdtype(np_dtype, np.str_): + return TensorProto.STRING # type: ignore[no-any-return] + + raise ValueError( + f"Unable to convert type {np_dtype!r} into TensorProto element type." + ) + + +def get_all_tensor_dtypes() -> KeysView[int]: + """Get all tensor types from TensorProto. + + Returns: + all tensor types from TensorProto + """ + return _mapping.TENSOR_TYPE_MAP.keys() + + +_ATTRIBUTE_TYPE_TO_STR: dict[int, str] = { + k: v + for v, k in AttributeProto.AttributeType.items() # type: ignore[attr-defined] +} + + +def _attr_type_to_str(attr_type: int) -> str: + """Convert AttributeProto type to string. + + Args: + attr_type: AttributeProto type. + + Returns: + String representing the supplied attr_type. + """ + if attr_type in AttributeProto.AttributeType.values(): # type: ignore[attr-defined] + return _ATTRIBUTE_TYPE_TO_STR[attr_type] + return AttributeProto.AttributeType.keys()[0] # type: ignore diff --git a/pythonProject/.venv/Lib/site-packages/onnx/hub.py b/pythonProject/.venv/Lib/site-packages/onnx/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..496f107f809a44bc8c21e5ebb2bbe105993de663 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/hub.py @@ -0,0 +1,483 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +"""ONNX Model Hub + +This implements the python client for the ONNX model hub. +""" + +from __future__ import annotations + +import hashlib +import json +import os +import sys +from io import BytesIO +from os.path import join +from typing import IO, Any, cast +from urllib.error import HTTPError +from urllib.request import urlopen + +import onnx + +if "ONNX_HOME" in os.environ: + _ONNX_HUB_DIR = join(os.environ["ONNX_HOME"], "hub") +elif "XDG_CACHE_HOME" in os.environ: + _ONNX_HUB_DIR = join(os.environ["XDG_CACHE_HOME"], "onnx", "hub") +else: + _ONNX_HUB_DIR = join(os.path.expanduser("~"), ".cache", "onnx", "hub") + + +class ModelInfo: + """A class to represent a model's property and metadata in the ONNX Hub. + It extracts model name, path, sha, tags, etc. from the passed in raw_model_info dict. + + Attributes: + model: The name of the model. + model_path: The path to the model, relative to the model zoo (https://github.com/onnx/models/) repo root. + metadata: Additional metadata of the model, such as the size of the model, IO ports, etc. + model_sha: The SHA256 digest of the model file. + tags: A set of tags associated with the model. + opset: The opset version of the model. + """ + + def __init__(self, raw_model_info: dict[str, Any]) -> None: + """Initializer. + + Args: + raw_model_info: A JSON dict containing the model info. + """ + self.model = cast("str", raw_model_info["model"]) + + self.model_path = cast("str", raw_model_info["model_path"]) + self.metadata: dict[str, Any] = cast( + "dict[str, Any]", raw_model_info["metadata"] + ) + self.model_sha: str | None = None + if "model_sha" in self.metadata: + self.model_sha = cast("str", self.metadata["model_sha"]) + + self.tags: set[str] = set() + if "tags" in self.metadata: + self.tags = set(cast("list[str]", self.metadata["tags"])) + + self.opset = cast("int", raw_model_info["opset_version"]) + self.raw_model_info: dict[str, Any] = raw_model_info + + def __str__(self) -> str: + return f"ModelInfo(model={self.model}, opset={self.opset}, path={self.model_path}, metadata={self.metadata})" + + def __repr__(self) -> str: + return self.__str__() + + +def set_dir(new_dir: str) -> None: + """Sets the current ONNX hub cache location. + + Args: + new_dir: Location of new model hub cache. + """ + global _ONNX_HUB_DIR # noqa: PLW0603 + _ONNX_HUB_DIR = new_dir + + +def get_dir() -> str: + """Gets the current ONNX hub cache location. + + Returns: + The location of the ONNX hub model cache. + """ + return _ONNX_HUB_DIR + + +def _parse_repo_info(repo: str) -> tuple[str, str, str]: + """Gets the repo owner, name and ref from a repo specification string.""" + repo_owner = repo.split(":")[0].split("/")[0] + repo_name = repo.split(":")[0].split("/")[1] + if ":" in repo: + repo_ref = repo.split(":")[1] + else: + repo_ref = "main" + return repo_owner, repo_name, repo_ref + + +def _verify_repo_ref(repo: str) -> bool: + """Verifies whether the given model repo can be trusted. + A model repo can be trusted if it matches onnx/models:main. + """ + repo_owner, repo_name, repo_ref = _parse_repo_info(repo) + return (repo_owner == "onnx") and (repo_name == "models") and (repo_ref == "main") + + +def _get_base_url(repo: str, lfs: bool = False) -> str: + """Gets the base github url from a repo specification string. + + Args: + repo: The location of the model repo in format + "user/repo[:branch]". If no branch is found will default to + "main". + lfs: Whether the url is for downloading lfs models. + + Returns: + The base github url for downloading. + """ + repo_owner, repo_name, repo_ref = _parse_repo_info(repo) + + if lfs: + return f"https://media.githubusercontent.com/media/{repo_owner}/{repo_name}/{repo_ref}/" + return f"https://raw.githubusercontent.com/{repo_owner}/{repo_name}/{repo_ref}/" + + +def _download_file(url: str, file_name: str) -> None: + """Downloads the file with specified file_name from the url. + + Args: + url: A url of download link. + file_name: A specified file name for the downloaded file. + """ + chunk_size = 16384 # 1024 * 16 + with urlopen(url) as response, open(file_name, "wb") as f: + # Loads processively with chuck_size for huge models + while True: + chunk = response.read(chunk_size) + if not chunk: + break + f.write(chunk) + + +def list_models( + repo: str = "onnx/models:main", + model: str | None = None, + tags: list[str] | None = None, +) -> list[ModelInfo]: + """Gets the list of model info consistent with a given name and tags + + Args: + repo: The location of the model repo in format + "user/repo[:branch]". If no branch is found will default to + "main" + model: The name of the model to search for. If `None`, will + return all models with matching tags. + tags: A list of tags to filter models by. If `None`, will return + all models with matching name. + + Returns: + ``ModelInfo``s. + """ + base_url = _get_base_url(repo) + manifest_url = base_url + "ONNX_HUB_MANIFEST.json" + try: + with urlopen(manifest_url) as response: + manifest: list[ModelInfo] = [ + ModelInfo(info) for info in json.load(cast("IO[str]", response)) + ] + except HTTPError as e: + raise AssertionError(f"Could not find manifest at {manifest_url}") from e + + # Filter by model name first. + matching_models = ( + manifest + if model is None + else [m for m in manifest if m.model.lower() == model.lower()] + ) + + # Filter by tags + if tags is None: + return matching_models + + canonical_tags = {t.lower() for t in tags} + matching_info_list: list[ModelInfo] = [] + for m in matching_models: + model_tags = {t.lower() for t in m.tags} + if len(canonical_tags.intersection(model_tags)) > 0: + matching_info_list.append(m) + return matching_info_list + + +def get_model_info( + model: str, repo: str = "onnx/models:main", opset: int | None = None +) -> ModelInfo: + """Gets the model info matching the given name and opset. + + Args: + model: The name of the onnx model in the manifest. This field is + case-sensitive + repo: The location of the model repo in format + "user/repo[:branch]". If no branch is found will default to + "main" + opset: The opset of the model to get. The default of `None` will + return the model with largest opset. + + Returns: + ``ModelInfo``. + """ + matching_models = list_models(repo, model) + if not matching_models: + raise AssertionError(f"No models found with name {model}") + + if opset is None: + selected_models = sorted(matching_models, key=lambda m: -m.opset) + else: + selected_models = [m for m in matching_models if m.opset == opset] + if not selected_models: + valid_opsets = [m.opset for m in matching_models] + raise AssertionError( + f"{model} has no version with opset {opset}. Valid opsets: {valid_opsets}" + ) + return selected_models[0] + + +def load( + model: str, + repo: str = "onnx/models:main", + opset: int | None = None, + force_reload: bool = False, + silent: bool = False, +) -> onnx.ModelProto | None: + """Downloads a model by name from the onnx model hub. + + Args: + model: The name of the onnx model in the manifest. This field is + case-sensitive + repo: The location of the model repo in format + "user/repo[:branch]". If no branch is found will default to + "main" + opset: The opset of the model to download. The default of `None` + automatically chooses the largest opset + force_reload: Whether to force the model to re-download even if + its already found in the cache + silent: Whether to suppress the warning message if the repo is + not trusted. + + Returns: + ModelProto or None + """ + selected_model = get_model_info(model, repo, opset) + local_model_path_arr = selected_model.model_path.split("/") + if selected_model.model_sha is not None: + local_model_path_arr[-1] = ( + f"{selected_model.model_sha}_{local_model_path_arr[-1]}" + ) + local_model_path = join(_ONNX_HUB_DIR, os.sep.join(local_model_path_arr)) + + if force_reload or not os.path.exists(local_model_path): + if not _verify_repo_ref(repo) and not silent: + msg = f"The model repo specification {repo} is not trusted and may contain security vulnerabilities. Only continue if you trust this repo." + + print(msg, file=sys.stderr) + print("Continue?[y/n]") + if input().lower() != "y": + return None + + os.makedirs(os.path.dirname(local_model_path), exist_ok=True) + lfs_url = _get_base_url(repo, True) + print(f"Downloading {model} to local path {local_model_path}") + _download_file(lfs_url + selected_model.model_path, local_model_path) + else: + print(f"Using cached {model} model from {local_model_path}") + + with open(local_model_path, "rb") as f: + model_bytes = f.read() + + if selected_model.model_sha is not None: + downloaded_sha = hashlib.sha256(model_bytes).hexdigest() + if not downloaded_sha == selected_model.model_sha: + raise AssertionError( + f"The cached model {selected_model.model} has SHA256 {downloaded_sha} " + f"while checksum should be {selected_model.model_sha}. " + "The model in the hub may have been updated. Use force_reload to " + "download the model from the model hub." + ) + + return onnx.load(cast("IO[bytes]", BytesIO(model_bytes))) + + +def download_model_with_test_data( + model: str, + repo: str = "onnx/models:main", + opset: int | None = None, + force_reload: bool = False, + silent: bool = False, +) -> str | None: + """Downloads a model along with test data by name from the onnx model hub and returns the directory to which the files have been extracted. + Users are responsible for making sure the model comes from a trusted source, and the data is safe to be extracted. + + Args: + model: The name of the onnx model in the manifest. This field is + case-sensitive + repo: The location of the model repo in format + "user/repo[:branch]". If no branch is found will default to + "main" + opset: The opset of the model to download. The default of `None` + automatically chooses the largest opset + force_reload: Whether to force the model to re-download even if + its already found in the cache + silent: Whether to suppress the warning message if the repo is + not trusted. + + Returns: + str or None + """ + selected_model = get_model_info(model, repo, opset) + + local_model_with_data_path_arr = selected_model.metadata[ + "model_with_data_path" + ].split("/") + + model_with_data_sha = selected_model.metadata["model_with_data_sha"] + + if model_with_data_sha is not None: + local_model_with_data_path_arr[-1] = ( + f"{model_with_data_sha}_{local_model_with_data_path_arr[-1]}" + ) + local_model_with_data_path = join( + _ONNX_HUB_DIR, os.sep.join(local_model_with_data_path_arr) + ) + + if force_reload or not os.path.exists(local_model_with_data_path): + if not _verify_repo_ref(repo) and not silent: + msg = f"The model repo specification {repo} is not trusted and may contain security vulnerabilities. Only continue if you trust this repo." + + print(msg, file=sys.stderr) + print("Continue?[y/n]") + if input().lower() != "y": + return None + + os.makedirs(os.path.dirname(local_model_with_data_path), exist_ok=True) + lfs_url = _get_base_url(repo, True) + print(f"Downloading {model} to local path {local_model_with_data_path}") + _download_file( + lfs_url + selected_model.metadata["model_with_data_path"], + local_model_with_data_path, + ) + else: + print(f"Using cached {model} model from {local_model_with_data_path}") + + with open(local_model_with_data_path, "rb") as f: + model_with_data_bytes = f.read() + + if model_with_data_sha is not None: + downloaded_sha = hashlib.sha256(model_with_data_bytes).hexdigest() + if not downloaded_sha == model_with_data_sha: + raise AssertionError( + f"The cached model {selected_model.model} has SHA256 {downloaded_sha} " + f"while checksum should be {model_with_data_sha}. " + "The model in the hub may have been updated. Use force_reload to " + "download the model from the model hub." + ) + + local_model_with_data_dir_path = local_model_with_data_path[ + 0 : len(local_model_with_data_path) - len(".tar.gz") + ] + onnx.utils._extract_model_safe( + local_model_with_data_path, local_model_with_data_dir_path + ) + model_with_data_path = ( + local_model_with_data_dir_path + + "/" + + os.listdir(local_model_with_data_dir_path)[0] + ) + + return model_with_data_path + + +def load_composite_model( + network_model: str, + preprocessing_model: str, + network_repo: str = "onnx/models:main", + preprocessing_repo: str = "onnx/models:main", + opset: int | None = None, + force_reload: bool = False, + silent: bool = False, +) -> onnx.ModelProto | None: + """Builds a composite model including data preprocessing by downloading a network and a preprocessing model + and combine it into a single model + + Args: + network_model: The name of the onnx model in the manifest. + preprocessing_model: The name of the preprocessing model. + network_repo: The location of the model repo in format + "user/repo[:branch]". If no branch is found will default to + "main" + preprocessing_repo: The location of the proprocessing model repo in format + "user/repo[:branch]". If no branch is found will default to + "main" + opset: The opset of the model to download. The default of `None` + automatically chooses the largest opset + force_reload: Whether to force the model to re-download even if + its already found in the cache + silent: Whether to suppress the warning message if the repo is + not trusted. + + Returns: + ModelProto or None + """ + preprocessing = load( + preprocessing_model, preprocessing_repo, opset, force_reload, silent + ) + if preprocessing is None: + raise RuntimeError( + f"Could not load the preprocessing model: {preprocessing_model}" + ) + network = load(network_model, network_repo, opset, force_reload, silent) + if network is None: + raise RuntimeError(f"Could not load the network model: {network_model}") + + all_domains: set[str] = set() + domains_to_version_network: dict[str, int] = {} + domains_to_version_preprocessing: dict[str, int] = {} + + for opset_import_entry in network.opset_import: + domain = ( + "ai.onnx" if opset_import_entry.domain == "" else opset_import_entry.domain + ) + all_domains.add(domain) + domains_to_version_network[domain] = opset_import_entry.version + + for opset_import_entry in preprocessing.opset_import: + domain = ( + "ai.onnx" if opset_import_entry.domain == "" else opset_import_entry.domain + ) + all_domains.add(domain) + domains_to_version_preprocessing[domain] = opset_import_entry.version + + preprocessing_opset_version = -1 + network_opset_version = -1 + for domain in all_domains: + if domain == "ai.onnx": + preprocessing_opset_version = domains_to_version_preprocessing[domain] + network_opset_version = domains_to_version_network[domain] + elif ( + domain in domains_to_version_preprocessing + and domain in domains_to_version_network + and domains_to_version_preprocessing[domain] + != domains_to_version_preprocessing[domain] + ): + raise ValueError( + f"Can not merge {preprocessing_model} and {network_model} because they contain " + f"different opset versions for domain {domain} ({domains_to_version_preprocessing[domain]}) " + f"and {domains_to_version_network[domain]}). Only the default domain can be " + "automatically converted to the highest version of the two." + ) + if preprocessing_opset_version > network_opset_version: + network = onnx.version_converter.convert_version( + network, preprocessing_opset_version + ) + network.ir_version = preprocessing.ir_version + onnx.checker.check_model(network) + elif network_opset_version > preprocessing_opset_version: + preprocessing = onnx.version_converter.convert_version( + preprocessing, network_opset_version + ) + preprocessing.ir_version = network.ir_version + onnx.checker.check_model(preprocessing) + + io_map = [ + (out_entry.name, in_entry.name) + for out_entry, in_entry in zip(preprocessing.graph.output, network.graph.input) + ] + + model_with_preprocessing = onnx.compose.merge_models( + preprocessing, network, io_map=io_map + ) + return model_with_preprocessing diff --git a/pythonProject/.venv/Lib/site-packages/onnx/inliner.py b/pythonProject/.venv/Lib/site-packages/onnx/inliner.py new file mode 100644 index 0000000000000000000000000000000000000000..bb12d76bcae1089c7b16821fc0329a8933cf45b0 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/inliner.py @@ -0,0 +1,60 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +import onnx +import onnx.onnx_cpp2py_export.inliner as C # noqa: N812 + + +def inline_local_functions( + model: onnx.ModelProto, convert_version: bool = False +) -> onnx.ModelProto: + """Inline model-local functions in given model. + + Arguments: + model: an ONNX ModelProto + convert_version: if true, try to apply automatic version-conversion to functions requiring a + different (ONNX) opset version from the model. + + Returns: + ModelProto with all calls to model-local functions inlined (recursively) + """ + result = C.inline_local_functions(model.SerializeToString(), convert_version) + inlined_model = onnx.ModelProto() + inlined_model.ParseFromString(result) + return inlined_model + + +def inline_selected_functions( + model: onnx.ModelProto, + function_ids: list[tuple[str, str]], + exclude: bool = False, + inline_schema_functions: bool = False, +) -> onnx.ModelProto: + """Inline selected functions in given model. + + Arguments: + model: an ONNX ModelProto + function_ids: list of functions to include/exclude when inlining. Each + element is a tuple of (function domain, function name). + exclude: if true, inlines all functions except those specified in function_ids. + if false, inlines all functions specified in function_ids. + inline_schema_functions: if true, inlines schema-defined functions as well + as model-local functions. Otherwise, only model-local functions are inlined. + + Returns: + ModelProto with all calls to model-local functions inlined (recursively) + """ + if inline_schema_functions: + result = C.inline_selected_functions2( + model.SerializeToString(), function_ids, exclude + ) + else: + result = C.inline_selected_functions( + model.SerializeToString(), function_ids, exclude + ) + inlined_model = onnx.ModelProto() + inlined_model.ParseFromString(result) + return inlined_model diff --git a/pythonProject/.venv/Lib/site-packages/onnx/model_container.py b/pythonProject/.venv/Lib/site-packages/onnx/model_container.py new file mode 100644 index 0000000000000000000000000000000000000000..8a95c9e482d91262fafddacc719f85bc55679628 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/model_container.py @@ -0,0 +1,350 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +"""Implements function make_large_model to easily create and save models +bigger than 2 Gb. +""" + +from __future__ import annotations + +import os +import sys +from typing import TYPE_CHECKING, Any + +import numpy as np + +import onnx +import onnx.external_data_helper as ext_data +import onnx.helper +import onnx.onnx_cpp2py_export.checker as c_checker + +if TYPE_CHECKING: + from collections.abc import Iterable + + +def _set_external_data( + tensor: onnx.TensorProto, + location: str, + offset: int | None = None, + length: int | None = None, + checksum: str | None = None, + basepath: str | None = None, +) -> None: + del tensor.external_data[:] + tensor.data_location = onnx.TensorProto.EXTERNAL + for k, v in { + "location": location, + "offset": offset, + "length": length, + "checksum": checksum, + "basepath": basepath, + }.items(): + if v is not None: + entry = tensor.external_data.add() + entry.key = k + entry.value = str(v) + + +def _enumerate_subgraphs(graph): + for node in graph.node: + for att in node.attribute: + if att.g: + yield att.g + yield from _enumerate_subgraphs(att.g) + + +def make_large_tensor_proto( + location: str, tensor_name: str, tensor_type: int, shape: tuple[int, ...] +) -> onnx.TensorProto: + """Create an external tensor. + + Arguments: + location: unique identifier (not necessary a path) + tensor_name: tensor name in the graph + tensor_type: onnx type + shape: shape the of the initializer + + Returns: + the created tensor + """ + tensor_location = location + tensor = onnx.TensorProto() + tensor.name = tensor_name + _set_external_data(tensor, tensor_location) + tensor.data_type = tensor_type + tensor.dims.extend(shape) + return tensor + + +class ModelContainer: + """Implements an API to store large tensors outside the main ModelProto, + it avoids copying large initializers when defining the model and these initializers + are never serialized through protobuf. + No tensor is stored on disk until the user explicitly saves the model. + """ + + def __init__(self) -> None: + self.model_proto_: onnx.ModelProto | None = None + self.large_initializers: dict[str, np.ndarray] = {} + + def check_model(self): + if self.model_proto is not None: + onnx.checker.check_model(self.model_proto) + + def __getitem__(self, name: str) -> np.ndarray: + """Returns an external tensor given its name.""" + if name not in self.large_initializers: + raise ValueError( + f"Unable to find large tensor {name!r} among {sorted(self.large_initializers)}." + ) + return self.large_initializers[name] + + @property + def model_proto(self) -> onnx.ModelProto: + if self.model_proto_ is None: + raise RuntimeError("ModelContainer is empty.") + return self.model_proto_ + + @model_proto.setter + def model_proto(self, model_proto: onnx.ModelProto): + self.model_proto_ = model_proto + self.graphs_ = list(self.enumerate_graph_protos()) + + def enumerate_graph_protos(self) -> Iterable[onnx.GraphProto]: + """Enumerates all GraphProtos in a model.""" + yield self.model_proto.graph + yield from _enumerate_subgraphs(self.model_proto.graph) + + def is_in_memory_external_initializer(self, name: str) -> bool: + """Tells if an initializer name is an external initializer stored in memory. + The name must start with '#' in that case. + """ + return name.startswith("#") + + def set_large_initializers(self, large_initializers: dict[str, np.ndarray]): + """Adds all large tensors (not stored in the model).""" + for k in large_initializers: + if not self.is_in_memory_external_initializer(k): + raise ValueError( + f"The location {k!r} must start with '#' to be ignored by check model." + ) + self.large_initializers = large_initializers + + def check_large_initializers(self) -> None: + for tensor in ext_data._get_all_tensors(self.model_proto): + if not ext_data.uses_external_data(tensor): + continue + prop: onnx.StringStringEntryProto | None = None + for ext in tensor.external_data: + if ext.key == "location": + prop = ext + if prop is None: + raise RuntimeError( + f"No location found for tensor name {tensor.name!r}." + ) + if prop.value not in self.large_initializers: + raise RuntimeError( + f"Unable to find large tensor named {tensor.name!r} " + f"with location {prop.value!r} in " + f"{sorted(self.large_initializers)}." + ) + + def _save_external( + self, file_path: str, all_tensors_to_one_file: bool + ) -> onnx.ModelProto: + """Save the large model into a main onnx file and one file + per tensor. Follows the same format as :func:`write_external_data_tensors + `. + The main model needs to be modified to update the file location, + the function returns this modified copy. + + Arguments: + file_path: model file + all_tensors_to_one_file: all tensors in one file + + Returns: + modified main model proto + """ + + def _clean_name(prefix: str, name: str, unique_names: dict[str, int]) -> str: + if prefix: + name = f"{prefix}-{name}" + for c in ":/\\;,!": + name = name.replace(c, "") + base_name = name + if name in unique_names: + i = unique_names[name] + 1 + unique_names[name] = i + return f"{base_name}_{i}" + unique_names[name] = 1 + return name + + unique_names: dict[str, int] = {} + folder = os.path.dirname(file_path) + if not os.path.exists(folder): + raise FileNotFoundError(f"Folder {folder!r} does not exist.") + proto = self.model_proto.SerializeToString() + copy = onnx.ModelProto() + copy.ParseFromString(proto) + prefix = os.path.splitext(os.path.split(file_path)[-1])[0] + + if all_tensors_to_one_file: + file_weight = f"{os.path.split(file_path)[1]}.weight" + full_file_weight = f"{file_path}.weight" + offset = 0 + with open(full_file_weight, "wb") as f: + pass + + for tensor in ext_data._get_all_tensors(copy): + if not ext_data.uses_external_data(tensor): + continue + prop: onnx.StringStringEntryProto | None = None + for ext in tensor.external_data: + if ext.key == "location": + prop = ext + if prop is None: + raise RuntimeError( + f"No location found for tensor name {tensor.name!r}." + ) + if prop.value not in self.large_initializers: + raise RuntimeError( + f"Unable to find large tensor named {tensor.name!r} " + f"with location {prop.value!r} in " + f"{sorted(self.large_initializers)}." + ) + np_tensor = self.large_initializers[prop.value] + + if sys.byteorder == "big": + # Convert endian from little to big + tensor_bytes = np_tensor.byteswap().tobytes() + else: + tensor_bytes = np_tensor.tobytes() + if all_tensors_to_one_file: + _set_external_data( + tensor, + location=file_weight, + offset=offset, + length=len(tensor_bytes), + ) + offset += len(tensor_bytes) + with open(full_file_weight, "ab") as f: + f.write(tensor_bytes) + else: + name = f"{_clean_name(prefix, prop.value, unique_names)}.weight" + _set_external_data(tensor, location=name) + full_name = os.path.join(folder, name) + prop.value = name + with open(full_name, "wb") as f: + f.write(tensor_bytes) + + with open(file_path, "wb") as f: + f.write(copy.SerializeToString()) + return copy + + def save( + self, + file_path: str, + all_tensors_to_one_file: bool = False, + ) -> onnx.ModelProto: + """Save the large model. + The function returns a ModelProto, + the current one if the model did not need any modification, + a modified copy of it if it required changes such as giving file names + to every external tensor. + + Arguments: + file_path: model file + all_tensors_to_one_file: saves all large tensors in one file or + one file per lerge tensor + + Returns: + the saved ModelProto + """ + return self._save_external( + file_path, all_tensors_to_one_file=all_tensors_to_one_file + ) + + def load(self, file_path: str, load_large_initializers: bool = True): + """Load the large model. + + Arguments: + file_path: model file + load_large_initializers: loads the large initializers, + if not done, the model is incomplete but it can be used to + look into the model without executing it and method + :meth:`_load_large_initializers` can be used to load them later + """ + self.model_proto_ = onnx.load_model(file_path, load_external_data=False) + if load_large_initializers: + self._load_large_initializers(file_path) + + def _load_large_initializers(self, file_path): + """Loads large initializers. + + Arguments: + file_path: model file, the weight are expected to be in the same folder as this file + """ + if self.model_proto_ is None: + raise RuntimeError("A model must be loaded before loading the weights.") + self.large_initializers = {} + base_dir = os.path.dirname(file_path) + for i, tensor in enumerate(ext_data._get_all_tensors(self.model_proto_)): + if not ext_data.uses_external_data(tensor): + continue + + info = ext_data.ExternalDataInfo(tensor) + external_data_file_path = c_checker._resolve_external_data_location( # type: ignore[attr-defined] + base_dir, info.location, tensor.name + ) + key = f"#t{i}" + _set_external_data(tensor, location=key) + + with open(external_data_file_path, "rb") as data_file: + if info.offset: + data_file.seek(info.offset) + + raw_data = ( + data_file.read(info.length) if info.length else data_file.read() + ) + + dtype = onnx.helper.tensor_dtype_to_np_dtype(tensor.data_type) + shape = tuple(tensor.dims) + + if sys.byteorder == "big": + np_tensor = ( + np.frombuffer(raw_data, dtype=dtype).byteswap().reshape(shape) + ) + else: + np_tensor = np.frombuffer(raw_data, dtype=dtype).reshape(shape) + + self.large_initializers[key] = np_tensor + + +def make_large_model( + graph: onnx.GraphProto, + large_initializers: dict[str, np.ndarray] | None = None, + **kwargs: Any, +) -> ModelContainer: + """Construct a ModelContainer + + C API and Python API of protobuf do not operate without serializing + the protos. This function uses the Python API of ModelContainer. + + Arguments: + graph: *make_graph* returns + large_initializers: dictionary `name: large tensor`, + large tensor is any python object supporting the DLPack protocol, + the ownership the tensor is transferred to the ModelContainer, + the tensor must define method `tobytes` like numpy tensors + **kwargs: any attribute to add to the returned instance + + Returns: + ModelContainer + """ + model = onnx.helper.make_model(graph, **kwargs) + large_model = ModelContainer() + large_model.model_proto = model + if large_initializers: + large_model.set_large_initializers(large_initializers) + large_model.check_large_initializers() + return large_model diff --git a/pythonProject/.venv/Lib/site-packages/onnx/numpy_helper.py b/pythonProject/.venv/Lib/site-packages/onnx/numpy_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..3c7e49d9434c9eeffcd29016bc993e7136257ced --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/numpy_helper.py @@ -0,0 +1,812 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING, Any + +import ml_dtypes +import numpy as np +import numpy.typing as npt +import typing_extensions + +import onnx.external_data_helper +from onnx import helper, subbyte + +if TYPE_CHECKING: + from collections.abc import Sequence + +# System is little endian +_IS_LITTLE_ENDIAN = sys.byteorder == "little" + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion", + category=DeprecationWarning, +) +def bfloat16_to_float32( + data: np.int16 | np.int32 | np.ndarray, + dims: int | Sequence[int] | None = None, +) -> np.ndarray: + """Converts ndarray of bf16 (as uint32) to f32 (as uint32). + + Args: + data: A numpy array, empty dimensions are allowed if dims is + None. + dims: If specified, the function reshapes the results. + + Returns: + A numpy array of float32 with the same dimension if dims is + None, or reshaped to dims if specified + """ + shift = lambda x: x << 16 # noqa: E731 + if dims is None: + if len(data.shape) == 0: + return shift(np.array([data]).astype(np.int32)).view(np.float32)[0] # type: ignore[no-any-return] + return shift(data.astype(np.int32)).view(np.float32) # type: ignore[no-any-return] + return shift(data.astype(np.int32)).reshape(dims).view(np.float32) # type: ignore[no-any-return] + + +def _float8e4m3_to_float32_scalar(ival: int, fn: bool, uz: bool) -> np.float32: + if not fn: + raise NotImplementedError("fn=False is not implemented.") + if ival < 0 or ival > 255: # noqa: PLR2004 + raise ValueError(f"{ival} is not a float8.") + if uz: + exponent_bias = 8 + if ival == 0x80: # noqa: PLR2004 + return np.nan # type: ignore[return-value] + else: + exponent_bias = 7 + if ival == 255: # noqa: PLR2004 + return np.float32(-np.nan) + if ival == 127: # noqa: PLR2004 + return np.float32(np.nan) + + ival = np.uint32(ival) # type: ignore[assignment] + expo = (ival & 0x78) >> 3 + mant = ival & 0x07 + sign = ival & 0x80 + res = sign << 24 + if expo == 0: + if mant > 0: + expo = 0x7F - exponent_bias + if mant & 0x4 == 0: + mant &= 0x3 + mant <<= 1 + expo -= 1 + if mant & 0x4 == 0: + mant &= 0x3 + mant <<= 1 + expo -= 1 + res |= (mant & 0x3) << 21 + res |= expo << 23 + else: + res |= mant << 20 + expo += 0x7F - exponent_bias + res |= expo << 23 + f = np.uint32(res).view(np.float32) + return f + + +_float8e4m3_to_float32 = np.vectorize( + _float8e4m3_to_float32_scalar, excluded=["fn", "uz"] +) + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion", + category=DeprecationWarning, +) +def float8e4m3_to_float32( + data: np.int16 | np.int32 | np.ndarray, + dims: int | Sequence[int] | None = None, + fn: bool = True, + uz: bool = False, +) -> np.ndarray: + """Converts ndarray of float8, e4m3 (as uint32) to f32 (as uint32). + + See :ref:`onnx-detail-float8` for technical details. + + Args: + data: A numpy array, empty dimensions are allowed if dims is None. + dims: If specified, the function reshapes the results. + fn: No infinite values. + uz: No negative zero. + + Returns: + A numpy array of float32 with the same dimension if dims is None, + or reshaped to dims if specified. + """ + if not fn: + raise NotImplementedError( + "float32_to_float8e4m3 not implemented with fn=False." + ) + res = _float8e4m3_to_float32(data, fn=fn, uz=uz) + if dims is None: + return res # type: ignore[no-any-return] + return res.reshape(dims) # type: ignore[no-any-return] + + +def _float8e5m2_to_float32_scalar(ival: int, fn: bool, uz: bool) -> np.float32: + if fn and uz: + if ival == 0x80: # noqa: PLR2004 + return np.float32(np.nan) + exponent_bias = 16 + elif not fn and not uz: + if ival in {253, 254, 255}: + return np.float32(-np.nan) + if ival in {125, 126, 127}: + return np.float32(np.nan) + if ival == 252: # noqa: PLR2004 + return np.float32(-np.inf) + if ival == 124: # noqa: PLR2004 + return np.float32(np.inf) + exponent_bias = 15 + else: + raise NotImplementedError("fn and uz must be both False or True.") + + ival = np.uint32(ival) # type: ignore[assignment] + expo = (ival & 0x7C) >> 2 + mant = ival & 0x03 + sign = ival & 0x80 + res = sign << 24 + if expo == 0: + if mant > 0: + expo = 0x7F - exponent_bias + if mant & 0x2 == 0: + mant &= 0x1 + mant <<= 1 + expo -= 1 + res |= (mant & 0x1) << 22 + res |= expo << 23 + else: + res |= mant << 21 + expo += 0x7F - exponent_bias + res |= expo << 23 + f = np.uint32(res).view(np.float32) + return f + + +_float8e5m2_to_float32 = np.vectorize( + _float8e5m2_to_float32_scalar, excluded=["fn", "uz"] +) + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion", + category=DeprecationWarning, +) +def float8e5m2_to_float32( + data: np.int16 | np.int32 | np.ndarray, + dims: int | Sequence[int] | None = None, + fn: bool = False, + uz: bool = False, +) -> np.ndarray: + """Converts ndarray of float8, e5m2 (as uint32) to f32 (as uint32). + + See :ref:`onnx-detail-float8` for technical details. + + Args: + data: A numpy array, empty dimensions are allowed if dims is None. + dims: If specified, the function reshapes the results. + fn: No infinite values. + uz: No negative zero. + + Returns: + A numpy array of float32 with the same dimension if dims is None, + or reshaped to dims if specified + """ + res = _float8e5m2_to_float32(data, fn=fn, uz=uz) + if dims is None: + return res # type: ignore[no-any-return] + return res.reshape(dims) # type: ignore[no-any-return] + + +def to_float8e8m0( + x: np.ndarray, + saturate: bool = True, + round_mode: str = "up", +) -> np.ndarray: + """Convert float32 NumPy array to float8e8m0 representation. If the input + is not a float32 array, it will be cast to one first. + + Args: + x: Input array to convert. + saturate: Whether to saturate at max/min float8e8m0 value. + round_mode: "nearest", "up", or "down". + + Returns: + np.ndarray: Array of ml_dtypes.float8_e8m0fnu values. + """ + x_f32 = np.asarray(x, dtype=np.float32) + f_bits = x_f32.view(np.uint32) + + # Extract exponent bits + exponent = (f_bits >> 23) & 0xFF + exponent = exponent.astype( + np.uint16 + ) # use uint16 to prevent overflow during computation + + # Identify NaN or Inf + special_mask = exponent == 0xFF # noqa: PLR2004 + output = np.zeros_like(exponent, dtype=np.uint8) + output[special_mask] = 0xFF # Preserve NaN/Inf as max exponent + + # Process normal numbers + normal_mask = ~special_mask + + if round_mode == "nearest": + # Get guard, round, sticky, and least significant bits + g = ((f_bits & 0x400000) > 0).astype(np.uint8) + r = ((f_bits & 0x200000) > 0).astype(np.uint8) + s = ((f_bits & 0x1FFFFF) > 0).astype(np.uint8) + lsb = (exponent > 0).astype(np.uint8) + + round_up = (g == 1) & ((r == 1) | (s == 1) | (lsb == 1)) + + increment = np.zeros_like(exponent) + increment[round_up & normal_mask] = 1 + + if saturate: + max_mask = (exponent == 0xFE) & round_up & normal_mask # noqa: PLR2004 + increment[max_mask] = 0 # Don't overflow past max value + + exponent += increment + + elif round_mode == "up": + has_fraction = (f_bits & 0x4FFFFF) > 0 + round_up = has_fraction & normal_mask + + if saturate: + max_mask = (exponent == 0xFE) & round_up # noqa: PLR2004 + round_up[max_mask] = False + + exponent += round_up.astype(np.uint16) + + elif round_mode == "down": + pass # No rounding needed + + else: + raise ValueError(f"Unsupported rounding mode: {round_mode}") + + # Clip exponent to uint8 range + exponent = exponent.astype(np.uint8) + + output[normal_mask] = exponent[normal_mask] + + return output.view(ml_dtypes.float8_e8m0fnu) + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider implementing your own unpack logic", + category=DeprecationWarning, +) +def unpack_int4( + data: np.int32 | np.ndarray, + dims: int | Sequence[int], + signed: bool, +) -> np.ndarray: + """Converts ndarray of int4 (as packed uint8) to f32 + See :ref:`onnx-detail-int4` for technical details. + + Args: + data: A numpy array, empty dimensions are allowed if dims is + None. + dims: The dimensions are used to reshape the unpacked buffer + signed: Whether the 4 bit integer is signed or unsigned + + Returns: + A numpy array of float32 reshaped to dims. + """ + single_func = lambda x: subbyte.unpack_single_4bitx2(x, signed) # noqa: E731 + func = np.frompyfunc(single_func, 1, 2) + + res_high, res_low = func(data.ravel()) + res = np.empty((res_high.size + res_low.size,), dtype=np.float32) + res[0::2] = res_high + res[1::2] = res_low + + if ( + res.size == np.prod(dims) + 1 + ): # handle single-element padding due to odd number of elements + res = res.ravel()[:-1] + res = res.reshape(dims) + return res + + +def _unpacked_float4e2m1_to_float32( + x: npt.NDArray[np.uint8], +) -> npt.NDArray[np.float32]: + """Evaluate the numerical value of an array of unpacked float4e2m1 values (as uint8) + See :ref:`onnx-detail-int4` for technical details. + + Args: + x: an array of uint8 elements representing a float4e2m1 (using the 4 LSB) + + Returns: + An array of float32 elements representing the values of the float4e2m1 input. + """ + # x is stored in 4 LSB of int + sign = np.where(np.bitwise_and(x, 0x08), -1, 1) + mantissa = (x & 0x01).astype(np.float32) + exponent = ((x & 0x06) >> 1).astype(np.float32) + + val = np.where( + exponent == 0, + sign * (mantissa / 2.0), + sign * (1.0 + mantissa / 2.0) * 2.0 ** (exponent - 1), + ) # denormalized, normalized + return val + + +def _unpack_4bit( + data: npt.NDArray[np.uint8], dims: Sequence[int] +) -> npt.NDArray[np.uint8]: + """Convert a packed uint4 array to unpacked uint4 array represented as uint8. + + Args: + data: A numpy array. + dims: The dimensions are used to reshape the unpacked buffer. + + Returns: + A numpy array of int8/uint8 reshaped to dims. + """ + result = np.empty([data.size * 2], dtype=data.dtype) + array_low = data & np.uint8(0x0F) + array_high = data & np.uint8(0xF0) + array_high >>= np.uint8(4) + result[0::2] = array_low + result[1::2] = array_high + if result.size == np.prod(dims) + 1: + # handle single-element padding due to odd number of elements + result = result[:-1] + result.resize(dims, refcheck=False) + return result + + +def _pack_4bitx2(array: np.ndarray) -> npt.NDArray[np.uint8]: + """Convert a numpy array to flatten, packed int4/uint4. Elements must be in the correct range.""" + # Create a 1D copy + array_flat = array.ravel().view(np.uint8).copy() + size = array.size + odd_sized = size % 2 == 1 + if odd_sized: + array_flat.resize([size + 1], refcheck=False) + array_flat &= 0x0F + array_flat[1::2] <<= 4 + return array_flat[0::2] | array_flat[1::2] # type: ignore[return-type] + + +def to_array(tensor: onnx.TensorProto, base_dir: str = "") -> np.ndarray: # noqa: PLR0911 + """Converts a tensor def object to a numpy array. + + This function uses ml_dtypes if the dtype is not a native numpy dtype. + + Args: + tensor: a TensorProto object. + base_dir: if external tensor exists, base_dir can help to find the path to it + + Returns: + arr: the converted array. + """ + if tensor.HasField("segment"): + raise ValueError("Currently not supporting loading segments.") + if tensor.data_type == onnx.TensorProto.UNDEFINED: + raise TypeError("The element type in the input tensor is UNDEFINED.") + + tensor_dtype = tensor.data_type + np_dtype = helper.tensor_dtype_to_np_dtype(tensor_dtype) + storage_np_dtype = helper.tensor_dtype_to_np_dtype( + helper.tensor_dtype_to_storage_tensor_dtype(tensor_dtype) + ) + storage_field = helper.tensor_dtype_to_field(tensor_dtype) + dims = tensor.dims + + if tensor.data_type == onnx.TensorProto.STRING: + utf8_strings = getattr(tensor, storage_field) + ss = [s.decode("utf-8") for s in utf8_strings] + return np.asarray(ss).astype(np_dtype).reshape(dims) + + # Load raw data from external tensor if it exists + if onnx.external_data_helper.uses_external_data(tensor): + onnx.external_data_helper.load_external_data_for_tensor(tensor, base_dir) + + if tensor.HasField("raw_data"): + # Raw_bytes support: using frombuffer. + raw_data = tensor.raw_data + if sys.byteorder == "big": + # Convert endian from little to big + raw_data = np.frombuffer(raw_data, dtype=np_dtype).byteswap().tobytes() + + if tensor_dtype in { + onnx.TensorProto.INT4, + onnx.TensorProto.UINT4, + onnx.TensorProto.FLOAT4E2M1, + }: + data = np.frombuffer(raw_data, dtype=np.uint8) + return _unpack_4bit(data, dims).view(np_dtype) + + return np.frombuffer(raw_data, dtype=np_dtype).reshape(dims) + + if tensor_dtype in { + onnx.TensorProto.BFLOAT16, + onnx.TensorProto.FLOAT16, + onnx.TensorProto.INT16, + onnx.TensorProto.UINT16, + }: + return ( + np.array(tensor.int32_data, dtype=np.int32) + .view(np.uint32) + .astype(np.uint16) + .reshape(dims) + .view(np_dtype) + ) + + if tensor_dtype in { + onnx.TensorProto.FLOAT8E4M3FN, + onnx.TensorProto.FLOAT8E4M3FNUZ, + onnx.TensorProto.FLOAT8E5M2, + onnx.TensorProto.FLOAT8E5M2FNUZ, + onnx.TensorProto.FLOAT8E8M0, + onnx.TensorProto.BOOL, + }: + return ( + np.array(tensor.int32_data, dtype=np.int32) + .view(np.uint32) + .astype(np.uint8) + .view(np_dtype) + .reshape(dims) + ) + + if tensor_dtype in { + onnx.TensorProto.UINT4, + onnx.TensorProto.INT4, + onnx.TensorProto.FLOAT4E2M1, + }: + data = ( + np.array(tensor.int32_data, dtype=np.int32).view(np.uint32).astype(np.uint8) + ) + return _unpack_4bit(data, dims).view(np_dtype) + + data = getattr(tensor, storage_field) + if tensor_dtype in (onnx.TensorProto.COMPLEX64, onnx.TensorProto.COMPLEX128): + return np.array(data, dtype=storage_np_dtype).view(dtype=np_dtype).reshape(dims) + + return np.asarray(data, dtype=storage_np_dtype).astype(np_dtype).reshape(dims) + + +def from_array(array: np.ndarray, /, name: str | None = None) -> onnx.TensorProto: + """Converts an array into a TensorProto including + + Args: + array: a numpy array. + name: (optional) the name of the tensor. + + Returns: + TensorProto: the converted tensor def. + """ + tensor = onnx.TensorProto() + tensor.dims.extend(array.shape) + if name: + tensor.name = name + if array.dtype == object or np.issubdtype(array.dtype, np.str_): + # Special care for strings. + tensor.data_type = onnx.TensorProto.STRING + # TODO: Introduce full string support. + # We flatten the array in case there are n-D arrays are specified + # If you want more complex shapes then follow the below instructions. + # Unlike other types where the shape is automatically inferred from + # nested arrays of values, the only reliable way now to feed strings + # is to put them into a flat array then specify type astype(object) + # (otherwise all strings may have different types depending on their length) + # and then specify shape .reshape([x, y, z]) + flat_array = array.flatten() + for e in flat_array: + if isinstance(e, str): + tensor.string_data.append(e.encode("utf-8")) + elif isinstance(e, bytes): + tensor.string_data.append(e) + else: + raise NotImplementedError( + "Unrecognized object in the object array, expect a string, or array of bytes: ", + str(type(e)), + ) + return tensor + + dtype = helper.np_dtype_to_tensor_dtype(array.dtype) + if dtype in { + onnx.TensorProto.INT4, + onnx.TensorProto.UINT4, + onnx.TensorProto.FLOAT4E2M1, + }: + # Pack the array into int4 + array = _pack_4bitx2(array) + if not _IS_LITTLE_ENDIAN: + array = array.view(array.dtype.newbyteorder("<")) + + tensor.raw_data = array.tobytes() + tensor.data_type = dtype + return tensor + + +def to_list(sequence: onnx.SequenceProto) -> list[Any]: + """Converts a sequence def to a Python list. + + Args: + sequence: a SequenceProto object. + + Returns: + list: the converted list. + """ + elem_type = sequence.elem_type + if elem_type == onnx.SequenceProto.TENSOR: + return [to_array(v) for v in sequence.tensor_values] + if elem_type == onnx.SequenceProto.SPARSE_TENSOR: + return [to_array(v) for v in sequence.sparse_tensor_values] # type: ignore[arg-type] + if elem_type == onnx.SequenceProto.SEQUENCE: + return [to_list(v) for v in sequence.sequence_values] + if elem_type == onnx.SequenceProto.MAP: + return [to_dict(v) for v in sequence.map_values] + raise TypeError("The element type in the input sequence is not supported.") + + +def from_list( + lst: list[Any], name: str | None = None, dtype: int | None = None +) -> onnx.SequenceProto: + """Converts a list into a sequence def. + + Args: + lst: a Python list + name: (optional) the name of the sequence. + dtype: (optional) type of element in the input list, used for specifying + sequence values when converting an empty list. + + Returns: + SequenceProto: the converted sequence def. + """ + sequence = onnx.SequenceProto() + if name: + sequence.name = name + + if dtype: + elem_type = dtype + elif len(lst) > 0: + first_elem = lst[0] + if isinstance(first_elem, dict): + elem_type = onnx.SequenceProto.MAP + elif isinstance(first_elem, list): + elem_type = onnx.SequenceProto.SEQUENCE + else: + elem_type = onnx.SequenceProto.TENSOR + else: + # if empty input list and no dtype specified + # choose sequence of tensors on default + elem_type = onnx.SequenceProto.TENSOR + sequence.elem_type = elem_type + + if (len(lst) > 0) and not all(isinstance(elem, type(lst[0])) for elem in lst): + raise TypeError( + "The element type in the input list is not the same " + "for all elements and therefore is not supported as a sequence." + ) + + if elem_type == onnx.SequenceProto.TENSOR: + for tensor in lst: + sequence.tensor_values.extend([from_array(np.asarray(tensor))]) + elif elem_type == onnx.SequenceProto.SEQUENCE: + for seq in lst: + sequence.sequence_values.extend([from_list(seq)]) + elif elem_type == onnx.SequenceProto.MAP: + for mapping in lst: + sequence.map_values.extend([from_dict(mapping)]) + else: + raise TypeError( + "The element type in the input list is not a tensor, " + "sequence, or map and is not supported." + ) + return sequence + + +def to_dict(map_proto: onnx.MapProto) -> dict[Any, Any]: + """Converts a map def to a Python dictionary. + + Args: + map_proto: a MapProto object. + + Returns: + The converted dictionary. + """ + key_list: list[Any] = [] + if map_proto.key_type == onnx.TensorProto.STRING: + key_list = list(map_proto.string_keys) + else: + key_list = list(map_proto.keys) + + value_list = to_list(map_proto.values) + if len(key_list) != len(value_list): + raise IndexError( + "Length of keys and values for MapProto (map name: ", + map_proto.name, + ") are not the same.", + ) + dictionary = dict(zip(key_list, value_list)) + return dictionary + + +def from_dict(dict_: dict[Any, Any], name: str | None = None) -> onnx.MapProto: + """Converts a Python dictionary into a map def. + + Args: + dict_: Python dictionary + name: (optional) the name of the map. + + Returns: + MapProto: the converted map def. + """ + map_proto = onnx.MapProto() + if name: + map_proto.name = name + keys = list(dict_) + raw_key_type = np.result_type(keys[0]) + key_type = helper.np_dtype_to_tensor_dtype(raw_key_type) + + valid_key_int_types = { + onnx.TensorProto.INT8, + onnx.TensorProto.INT16, + onnx.TensorProto.INT32, + onnx.TensorProto.INT64, + onnx.TensorProto.UINT8, + onnx.TensorProto.UINT16, + onnx.TensorProto.UINT32, + onnx.TensorProto.UINT64, + } + + if not (all(np.result_type(key) == raw_key_type for key in keys)): + raise TypeError( + "The key type in the input dictionary is not the same " + "for all keys and therefore is not valid as a map." + ) + + values = list(dict_.values()) + raw_value_type = np.result_type(values[0]) + if not all(np.result_type(val) == raw_value_type for val in values): + raise TypeError( + "The value type in the input dictionary is not the same " + "for all values and therefore is not valid as a map." + ) + + value_seq = from_list(values) + + map_proto.key_type = key_type + if key_type == onnx.TensorProto.STRING: + map_proto.string_keys.extend(keys) + elif key_type in valid_key_int_types: + map_proto.keys.extend(keys) + map_proto.values.CopyFrom(value_seq) + return map_proto + + +def to_optional(optional: onnx.OptionalProto) -> Any | None: + """Converts an optional def to a Python optional. + + Args: + optional: an OptionalProto object. + + Returns: + opt: the converted optional. + """ + elem_type = optional.elem_type + if elem_type == onnx.OptionalProto.UNDEFINED: + return None + if elem_type == onnx.OptionalProto.TENSOR: + return to_array(optional.tensor_value) + if elem_type == onnx.OptionalProto.SPARSE_TENSOR: + return to_array(optional.sparse_tensor_value) # type: ignore[arg-type] + if elem_type == onnx.OptionalProto.SEQUENCE: + return to_list(optional.sequence_value) + if elem_type == onnx.OptionalProto.MAP: + return to_dict(optional.map_value) + if elem_type == onnx.OptionalProto.OPTIONAL: + return to_optional(optional.optional_value) + raise TypeError("The element type in the input optional is not supported.") + + +def from_optional( + opt: Any | None, name: str | None = None, dtype: int | None = None +) -> onnx.OptionalProto: + """Converts an optional value into a Optional def. + + Args: + opt: a Python optional + name: (optional) the name of the optional. + dtype: (optional) type of element in the input, used for specifying + optional values when converting empty none. dtype must + be a valid OptionalProto.DataType value + + Returns: + optional: the converted optional def. + """ + # TODO: create a map and replace conditional branches + optional = onnx.OptionalProto() + if name: + optional.name = name + + if dtype is not None: + # dtype must be a valid onnx.OptionalProto.DataType + if dtype not in onnx.OptionalProto.DataType.values(): + raise TypeError(f"{dtype} must be a valid OptionalProto.DataType.") + elem_type = dtype + elif isinstance(opt, dict): + elem_type = onnx.OptionalProto.MAP + elif isinstance(opt, list): + elem_type = onnx.OptionalProto.SEQUENCE + elif opt is None: + elem_type = onnx.OptionalProto.UNDEFINED + else: + elem_type = onnx.OptionalProto.TENSOR + + optional.elem_type = elem_type + + if opt is not None: + if elem_type == onnx.OptionalProto.TENSOR: + optional.tensor_value.CopyFrom(from_array(opt)) + elif elem_type == onnx.OptionalProto.SEQUENCE: + optional.sequence_value.CopyFrom(from_list(opt)) + elif elem_type == onnx.OptionalProto.MAP: + optional.map_value.CopyFrom(from_dict(opt)) + else: + raise TypeError( + "The element type in the input is not a tensor, " + "sequence, or map and is not supported." + ) + return optional + + +def create_random_int( + input_shape: tuple[int], dtype: np.dtype, seed: int = 1 +) -> np.ndarray: + """Create random integer array for backend/test/case/node. + + Args: + input_shape: The shape for the returned integer array. + dtype: The NumPy data type for the returned integer array. + seed: The seed for np.random. + + Returns: + np.ndarray: Random integer array. + """ + np.random.seed(seed) + if dtype in ( + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.int8, + np.int16, + np.int32, + np.int64, + ): + # the range of np.random.randint is int32; set a fixed boundary if overflow + end = min(np.iinfo(dtype).max, np.iinfo(np.int32).max) + start = max(np.iinfo(dtype).min, np.iinfo(np.int32).min) + return np.random.randint(start, end, size=input_shape).astype(dtype) + else: + raise TypeError(f"{dtype} is not supported by create_random_int.") + + +def saturate_cast(x: np.ndarray, dtype: np.dtype) -> np.ndarray: + """Saturate cast for numeric types. + + This function ensures that values outside the representable range + of the target dtype are clamped to the maximum or minimum representable + value of that dtype. + """ + if np.issubdtype(dtype, np.integer) or dtype in (ml_dtypes.int4, ml_dtypes.uint4): + info = ml_dtypes.iinfo(dtype) + x = np.round(x) + else: + info = ml_dtypes.finfo(dtype) # type: ignore[assignment] + + return np.clip(x, info.min, info.max).astype(dtype) diff --git a/pythonProject/.venv/Lib/site-packages/onnx/onnx-data.in.proto b/pythonProject/.venv/Lib/site-packages/onnx/onnx-data.in.proto new file mode 100644 index 0000000000000000000000000000000000000000..ec0f27b23c86851de3f5fdb6bb35501bd5c9ca52 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/onnx-data.in.proto @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: Apache-2.0 + + +syntax = "proto2"; + +package {PACKAGE_NAME}; +// #if ONNX-ML +import "onnx/onnx-ml.proto"; +// #else +import "onnx/onnx.proto"; +// #endif + +// This file contains the proto definitions for MapProto and +// SequenceProto. These protos are used to represent the data structures +// of maps and sequence for use in test data or ModelProto. + +// Sequences +// +// Defines a dense, ordered, collection of elements that are of homogeneous types. +// Sequences can be made out of tensors, maps, or sequences. +// +// If a sequence is made out of tensors, the tensors must have the same element +// type (i.e. int32). In some cases, the tensors in a sequence can have different +// shapes. Whether the tensors can have different shapes or not depends on the +// type/shape associated with the corresponding "ValueInfo". For example, +// "Sequence" means that all tensors have same shape. However, +// "Sequence" means they can have different +// shapes (all of rank 2), where "omitted" means the corresponding dimension has +// no symbolic/constant value. Finally, "Sequence>" means +// that the different tensors can have different ranks, when the "shape" itself +// is omitted from the tensor-type. For a more complete description, refer to +// https://github.com/onnx/onnx/blob/main/docs/IR.md#static-tensor-shapes. +// +message SequenceProto { + + optional string name = 1; + + enum DataType { + UNDEFINED = 0; + TENSOR = 1; + SPARSE_TENSOR = 2; + SEQUENCE = 3; + MAP = 4; + OPTIONAL = 5; + } + + // The data type of the element. + // This field MUST have a valid SequenceProto.DataType value + optional int32 elem_type = 2; + + // For TensorProto values. + // When this field is present, the elem_type field MUST be TENSOR. + repeated TensorProto tensor_values = 3; + + // For SparseTensorProto values. + // When this field is present, the elem_type field MUST be SPARSE_TENSOR. + repeated SparseTensorProto sparse_tensor_values = 4; + + // For SequenceProto values, allowing sequences to be of themselves. + // When this field is present, the elem_type field MUST be SEQUENCE. + repeated SequenceProto sequence_values = 5; + + // For MapProto values. + // When this field is present, the elem_type field MUST be MAP. + repeated MapProto map_values = 6; + + // For OptionalProto values. + // When this field is present, the elem_type field MUST be Optional. + repeated OptionalProto optional_values = 7; + +} + + +// Maps +// +// Specifies an associative table, defined by keys and values. +// MapProto is formed with a repeated field of keys (of type INT8, INT16, INT32, +// INT64, UINT8, UINT16, UINT32, UINT64, or STRING) and values (of type TENSOR, +// SPARSE_TENSOR, SEQUENCE, or MAP). Key types and value types have to remain +// the same throughout the instantiation of the MapProto. +// +message MapProto { + + optional string name = 1; + + // All MapProto data types must have the same length of keys and values. + + // The data type of the key. + // This field MUST have a valid TensorProto.DataType value of + // INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64, or STRING + optional int32 key_type = 2; + + // Every element of keys has to be one of the following data types + // INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64, or STRING. + // The integer cases are represented by the repeated int64 field keys below. + repeated int64 keys = 3; + + // If keys are strings, they are represented by the repeated bytes field + // string_keys below. + repeated bytes string_keys = 4; + + // MapProto values are represented in a SequenceProto of the same length as the + // repeated keys field and have to be one of the following data types + // TENSOR, SPARSE_TENSOR, MAP, SEQUENCE. + optional SequenceProto values = 5; +} + +// Optional +// +// +message OptionalProto { + + optional string name = 1; + + enum DataType { + UNDEFINED = 0; + TENSOR = 1; + SPARSE_TENSOR = 2; + SEQUENCE = 3; + MAP = 4; + OPTIONAL = 5; + } + + // The data type of the element, identifies if the OptionalProto value + // is Tensor, Sparse Tensor, Sequence, Map, or Optional. + // The type of the optional value MUST match the elem_type specified. + // This field MUST have a valid OptionalProto.DataType value. + optional int32 elem_type = 2; + + // For TensorProto value. + // When this field is present, the elem_type field MUST be TENSOR. + optional TensorProto tensor_value = 3; + + // For SparseTensorProto value. + // When this field is present, the elem_type field MUST be SPARSE_TENSOR. + optional SparseTensorProto sparse_tensor_value = 4; + + // For SequenceProto value. + // When this field is present, the elem_type field MUST be SEQUENCE. + optional SequenceProto sequence_value = 5; + + // For MapProto value. + // When this field is present, the elem_type field MUST be MAP. + optional MapProto map_value = 6; + + // For OptionalProto value, allowing optional to be of itself (completeness) + // When this field is present, the elem_type field MUST be OPTIONAL. + optional OptionalProto optional_value = 7; + +} diff --git a/pythonProject/.venv/Lib/site-packages/onnx/onnx-data.proto b/pythonProject/.venv/Lib/site-packages/onnx/onnx-data.proto new file mode 100644 index 0000000000000000000000000000000000000000..61bba91732db50b403ff5609c152782a51573984 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/onnx-data.proto @@ -0,0 +1,155 @@ +// +// WARNING: This file is automatically generated! Please edit onnx.in.proto. +// + + +// SPDX-License-Identifier: Apache-2.0 + + +syntax = "proto2"; + +package onnx; +import "onnx/onnx-ml.proto"; + +// This file contains the proto definitions for MapProto and +// SequenceProto. These protos are used to represent the data structures +// of maps and sequence for use in test data or ModelProto. + +// Sequences +// +// Defines a dense, ordered, collection of elements that are of homogeneous types. +// Sequences can be made out of tensors, maps, or sequences. +// +// If a sequence is made out of tensors, the tensors must have the same element +// type (i.e. int32). In some cases, the tensors in a sequence can have different +// shapes. Whether the tensors can have different shapes or not depends on the +// type/shape associated with the corresponding "ValueInfo". For example, +// "Sequence" means that all tensors have same shape. However, +// "Sequence" means they can have different +// shapes (all of rank 2), where "omitted" means the corresponding dimension has +// no symbolic/constant value. Finally, "Sequence>" means +// that the different tensors can have different ranks, when the "shape" itself +// is omitted from the tensor-type. For a more complete description, refer to +// https://github.com/onnx/onnx/blob/main/docs/IR.md#static-tensor-shapes. +// +message SequenceProto { + + optional string name = 1; + + enum DataType { + UNDEFINED = 0; + TENSOR = 1; + SPARSE_TENSOR = 2; + SEQUENCE = 3; + MAP = 4; + OPTIONAL = 5; + } + + // The data type of the element. + // This field MUST have a valid SequenceProto.DataType value + optional int32 elem_type = 2; + + // For TensorProto values. + // When this field is present, the elem_type field MUST be TENSOR. + repeated TensorProto tensor_values = 3; + + // For SparseTensorProto values. + // When this field is present, the elem_type field MUST be SPARSE_TENSOR. + repeated SparseTensorProto sparse_tensor_values = 4; + + // For SequenceProto values, allowing sequences to be of themselves. + // When this field is present, the elem_type field MUST be SEQUENCE. + repeated SequenceProto sequence_values = 5; + + // For MapProto values. + // When this field is present, the elem_type field MUST be MAP. + repeated MapProto map_values = 6; + + // For OptionalProto values. + // When this field is present, the elem_type field MUST be Optional. + repeated OptionalProto optional_values = 7; + +} + + +// Maps +// +// Specifies an associative table, defined by keys and values. +// MapProto is formed with a repeated field of keys (of type INT8, INT16, INT32, +// INT64, UINT8, UINT16, UINT32, UINT64, or STRING) and values (of type TENSOR, +// SPARSE_TENSOR, SEQUENCE, or MAP). Key types and value types have to remain +// the same throughout the instantiation of the MapProto. +// +message MapProto { + + optional string name = 1; + + // All MapProto data types must have the same length of keys and values. + + // The data type of the key. + // This field MUST have a valid TensorProto.DataType value of + // INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64, or STRING + optional int32 key_type = 2; + + // Every element of keys has to be one of the following data types + // INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64, or STRING. + // The integer cases are represented by the repeated int64 field keys below. + repeated int64 keys = 3; + + // If keys are strings, they are represented by the repeated bytes field + // string_keys below. + repeated bytes string_keys = 4; + + // MapProto values are represented in a SequenceProto of the same length as the + // repeated keys field and have to be one of the following data types + // TENSOR, SPARSE_TENSOR, MAP, SEQUENCE. + optional SequenceProto values = 5; +} + +// Optional +// +// +message OptionalProto { + + optional string name = 1; + + enum DataType { + UNDEFINED = 0; + TENSOR = 1; + SPARSE_TENSOR = 2; + SEQUENCE = 3; + MAP = 4; + OPTIONAL = 5; + } + + // The data type of the element, identifies if the OptionalProto value + // is Tensor, Sparse Tensor, Sequence, Map, or Optional. + // The type of the optional value MUST match the elem_type specified. + // This field MUST have a valid OptionalProto.DataType value. + optional int32 elem_type = 2; + + // For TensorProto value. + // When this field is present, the elem_type field MUST be TENSOR. + optional TensorProto tensor_value = 3; + + // For SparseTensorProto value. + // When this field is present, the elem_type field MUST be SPARSE_TENSOR. + optional SparseTensorProto sparse_tensor_value = 4; + + // For SequenceProto value. + // When this field is present, the elem_type field MUST be SEQUENCE. + optional SequenceProto sequence_value = 5; + + // For MapProto value. + // When this field is present, the elem_type field MUST be MAP. + optional MapProto map_value = 6; + + // For OptionalProto value, allowing optional to be of itself (completeness) + // When this field is present, the elem_type field MUST be OPTIONAL. + optional OptionalProto optional_value = 7; + +} + +// For using protobuf-lite +option optimize_for = LITE_RUNTIME; + diff --git a/pythonProject/.venv/Lib/site-packages/onnx/onnx-ml.proto b/pythonProject/.venv/Lib/site-packages/onnx/onnx-ml.proto new file mode 100644 index 0000000000000000000000000000000000000000..a6fd805587d1e45d4b25b439d46ed4b98260263e --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/onnx-ml.proto @@ -0,0 +1,1002 @@ +// +// WARNING: This file is automatically generated! Please edit onnx.in.proto. +// + + +// SPDX-License-Identifier: Apache-2.0 + + +syntax = "proto2"; + +package onnx; + +// Overview +// +// ONNX is an open specification that is comprised of the following components: +// +// 1) A definition of an extensible computation graph model. +// 2) Definitions of standard data types. +// 3) Definitions of built-in operators. +// +// This document describes the syntax of models and their computation graphs, +// as well as the standard data types. Together, they are referred to as the ONNX +// Intermediate Representation, or 'IR' for short. +// +// The normative semantic specification of the ONNX IR is found in docs/IR.md. +// Definitions of the built-in neural network operators may be found in docs/Operators.md. +// Definitions of the built-in classical machine learning operators may be found in +// docs/Operators-ml.md. + +// Notes +// +// Protobuf compatibility +// +// To simplify framework compatibility, ONNX is defined using the subset of protobuf +// that is compatible with both protobuf v2 and v3. This means that we do not use any +// protobuf features that are only available in one of the two versions. +// +// Here are the most notable contortions we have to carry out to work around +// these limitations: +// +// - No 'map' (added protobuf 3.0). We instead represent mappings as lists +// of key-value pairs, where order does not matter and duplicates +// are not allowed. + + +// Versioning +// +// ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md +// +// To be compatible with both proto2 and proto3, we will use a version number +// that is not defined by the default value but an explicit enum number. +enum Version { + // proto3 requires the first enum value to be zero. + // We add this just to appease the compiler. + _START_VERSION = 0; + // The version field is always serialized and we will use it to store the + // version that the graph is generated from. This helps us set up version + // control. + // For the IR, we are using simple numbers starting with 0x00000001, + // which was the version we published on Oct 10, 2017. + IR_VERSION_2017_10_10 = 0x0000000000000001; + + // IR_VERSION 2 published on Oct 30, 2017 + // - Added type discriminator to AttributeProto to support proto3 users + IR_VERSION_2017_10_30 = 0x0000000000000002; + + // IR VERSION 3 published on Nov 3, 2017 + // - For operator versioning: + // - Added new message OperatorSetIdProto + // - Added opset_import in ModelProto + // - For vendor extensions, added domain in NodeProto + IR_VERSION_2017_11_3 = 0x0000000000000003; + + // IR VERSION 4 published on Jan 22, 2019 + // - Relax constraint that initializers should be a subset of graph inputs + // - Add type BFLOAT16 + IR_VERSION_2019_1_22 = 0x0000000000000004; + + // IR VERSION 5 published on March 18, 2019 + // - Add message TensorAnnotation. + // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters. + IR_VERSION_2019_3_18 = 0x0000000000000005; + + // IR VERSION 6 published on Sep 19, 2019 + // - Add support for sparse tensor constants stored in model. + // - Add message SparseTensorProto + // - Add sparse initializers + IR_VERSION_2019_9_19 = 0x0000000000000006; + + // IR VERSION 7 published on May 8, 2020 + // - Add support to allow function body graph to rely on multiple external operator sets. + // - Add a list to promote inference graph's initializers to global and + // mutable variables. Global variables are visible in all graphs of the + // stored models. + // - Add message TrainingInfoProto to store initialization + // method and training algorithm. The execution of TrainingInfoProto + // can modify the values of mutable variables. + // - Implicitly add inference graph into each TrainingInfoProto's algorithm. + IR_VERSION_2020_5_8 = 0x0000000000000007; + + // IR VERSION 8 published on July 30, 2021 + // Introduce TypeProto.SparseTensor + // Introduce TypeProto.Optional + // Added a list of FunctionProtos local to the model + // Deprecated since_version and operator status from FunctionProto + IR_VERSION_2021_7_30 = 0x0000000000000008; + + // IR VERSION 9 published on May 5, 2023 + // Added AttributeProto to FunctionProto so that default attribute values can be set. + // Added FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ. + IR_VERSION_2023_5_5 = 0x0000000000000009; + + // IR VERSION 10 published on March 25, 2024 + // Added UINT4, INT4, overload field for functions and metadata_props on multiple proto definitions. + IR_VERSION_2024_3_25 = 0x000000000000000A; + + // IR VERSION 11 published on May 12, 2025 + // Added FLOAT4E2M1, multi-device protobuf classes. + IR_VERSION_2025_05_12 = 0x000000000000000B; + + // IR VERSION 12 published on TBD + // Added FLOAT8E8M0. + IR_VERSION = 0x000000000000000C; +} + +// Attributes +// +// A named attribute containing either singular float, integer, string, graph, +// and tensor values, or repeated float, integer, string, graph, and tensor values. +// An AttributeProto MUST contain the name field, and *only one* of the +// following content fields, effectively enforcing a C/C++ union equivalent. +message AttributeProto { + reserved 12, 16 to 19; + reserved "v"; + + // Note: this enum is structurally identical to the OpSchema::AttrType + // enum defined in schema.h. If you rev one, you likely need to rev the other. + enum AttributeType { + UNDEFINED = 0; + FLOAT = 1; + INT = 2; + STRING = 3; + TENSOR = 4; + GRAPH = 5; + SPARSE_TENSOR = 11; + TYPE_PROTO = 13; + + FLOATS = 6; + INTS = 7; + STRINGS = 8; + TENSORS = 9; + GRAPHS = 10; + SPARSE_TENSORS = 12; + TYPE_PROTOS = 14; + } + + // The name field MUST be present for this version of the IR. + optional string name = 1; // namespace Attribute + + // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function. + // In this case, this AttributeProto does not contain data, and it's a reference of attribute + // in parent scope. + // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph. + optional string ref_attr_name = 21; + + // A human-readable documentation for this attribute. Markdown is allowed. + optional string doc_string = 13; + + // The type field MUST be present for this version of the IR. + // For 0.0.1 versions of the IR, this field was not defined, and + // implementations needed to use has_field heuristics to determine + // which value field was in use. For IR_VERSION 0.0.2 or later, this + // field MUST be set and match the f|i|s|t|... field in use. This + // change was made to accommodate proto3 implementations. + optional AttributeType type = 20; // discriminator that indicates which field below is in use + + // Exactly ONE of the following fields must be present for this version of the IR + optional float f = 2; // float + optional int64 i = 3; // int + optional bytes s = 4; // UTF-8 string + optional TensorProto t = 5; // tensor value + optional GraphProto g = 6; // graph + optional SparseTensorProto sparse_tensor = 22; // sparse tensor value + // Do not use field below, it's deprecated. + // optional ValueProto v = 12; // value - subsumes everything but graph + optional TypeProto tp = 14; // type proto + + repeated float floats = 7; // list of floats + repeated int64 ints = 8; // list of ints + repeated bytes strings = 9; // list of UTF-8 strings + repeated TensorProto tensors = 10; // list of tensors + repeated GraphProto graphs = 11; // list of graph + repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors + repeated TypeProto type_protos = 15;// list of type protos +} + +// Defines information on value, including the name, the type, and +// the shape of the value. +message ValueInfoProto { + // This field MUST be present in this version of the IR. + optional string name = 1; // namespace Value + // This field MUST be present in this version of the IR for + // inputs and outputs of the top-level graph. + optional TypeProto type = 2; + // A human-readable documentation for this value. Markdown is allowed. + optional string doc_string = 3; + // Named metadata values; keys should be distinct. + repeated StringStringEntryProto metadata_props = 4; +} + +// Nodes +// +// Computation graphs are made up of a DAG of nodes, which represent what is +// commonly called a "layer" or "pipeline stage" in machine learning frameworks. +// +// For example, it can be a node of type "Conv" that takes in an image, a filter +// tensor and a bias tensor, and produces the convolved output. +message NodeProto { + repeated string input = 1; // namespace Value + repeated string output = 2; // namespace Value + + // An optional identifier for this node in a graph. + // This field MAY be absent in this version of the IR. + optional string name = 3; // namespace Node + + // The symbolic identifier of the Operator to execute. + optional string op_type = 4; // namespace Operator + // The domain of the OperatorSet that specifies the operator named by op_type. + optional string domain = 7; // namespace Domain + // Overload identifier, used only to map this to a model-local function. + optional string overload = 8; + + // Additional named attributes. + repeated AttributeProto attribute = 5; + + // A human-readable documentation for this node. Markdown is allowed. + optional string doc_string = 6; + + // Named metadata values; keys should be distinct. + repeated StringStringEntryProto metadata_props = 9; + + // Configuration of multi-device annotations. + repeated NodeDeviceConfigurationProto device_configurations = 10; +} + +// IntIntListEntryProto follows the pattern for cross-proto-version maps. +// See https://developers.google.com/protocol-buffers/docs/proto3#maps +message IntIntListEntryProto { + optional int64 key = 1; + repeated int64 value = 2; +}; + +// Multi-device configuration proto for NodeProto. +message NodeDeviceConfigurationProto { + // This field MUST be present for this version of the IR. + // ID of the configuration. MUST match the name of a DeviceConfigurationProto. + optional string configuration_id = 1; + // Sharding spec for the node. + repeated ShardingSpecProto sharding_spec = 2; + // Pipeline stage of this node. + optional int32 pipeline_stage = 3; +} + +// ShardingSpecProto: This describes the sharding spec for a specific +// input or output tensor of a node. +message ShardingSpecProto { + // This field MUST be present for this version of the IR. + // Identifies the input or output of the node that is being sharded. + // Required to match a name specified in the node's input or output list of ValueInfoProtos. + // It is called `logical tensor` in subsequent descriptions. + optional string tensor_name = 1; + + // The following is the list of devices across which the logical + // tensor is sharded or replicated. + repeated int64 device = 2; + + // Each element v in above field devices may represent either a + // device or a set of devices (when we want the same shard/tensor + // to be replicated across a subset of devices), as indicated by + // the following optional map. If the map contains an entry for v, + // then v represents a device group, and the map indicates the set + // of devices in that group. + repeated IntIntListEntryProto index_to_device_group_map = 3; + + // The following is the sharded-shape of the tensor, consisting of + // the sharding-spec for each axis of the tensor. + repeated ShardedDimProto sharded_dim = 4; +} + +// ShardedDimProto: This describes the sharding spec for a single +// axis of a sharded tensor. +message ShardedDimProto { + // This field MUST be present for this version of the IR. + // The axis this sharding corresponds to. Must be in the range of + // [-r, r - 1], where r is the rank of the tensor. Negative axis values means + // counting from the back. + optional int64 axis = 1; + + // Describes how the tensor on the provided axis is sharded. + // The common-case is described by a single instance of SimpleShardedDimProto. + // Multiple instances can be used to handle cases where a sharded + // tensor is reshaped, fusing multiple axes into one. + repeated SimpleShardedDimProto simple_sharding = 2; +} + +// SimpleShardedDimProto: Indicates that N blocks are divided into M shards. +// N is allowed to be symbolic where M is required to be a constant. +message SimpleShardedDimProto { + // Dimension value to be sharded. + oneof dim { + int64 dim_value = 1; + string dim_param = 2; + } + + // This field MUST be present for this version of the IR. + // Number of shards to split dim into. + optional int64 num_shards = 3; +} + +// Training information +// TrainingInfoProto stores information for training a model. +// In particular, this defines two functionalities: an initialization-step +// and a training-algorithm-step. Initialization resets the model +// back to its original state as if no training has been performed. +// Training algorithm improves the model based on input data. +// +// The semantics of the initialization-step is that the initializers +// in ModelProto.graph and in TrainingInfoProto.algorithm are first +// initialized as specified by the initializers in the graph, and then +// updated by the "initialization_binding" in every instance in +// ModelProto.training_info. +// +// The field "algorithm" defines a computation graph which represents a +// training algorithm's step. After the execution of a +// TrainingInfoProto.algorithm, the initializers specified by "update_binding" +// may be immediately updated. If the targeted training algorithm contains +// consecutive update steps (such as block coordinate descent methods), +// the user needs to create a TrainingInfoProto for each step. +message TrainingInfoProto { + // This field describes a graph to compute the initial tensors + // upon starting the training process. Initialization graph has no input + // and can have multiple outputs. Usually, trainable tensors in neural + // networks are randomly initialized. To achieve that, for each tensor, + // the user can put a random number operator such as RandomNormal or + // RandomUniform in TrainingInfoProto.initialization.node and assign its + // random output to the specific tensor using "initialization_binding". + // This graph can also set the initializers in "algorithm" in the same + // TrainingInfoProto; a use case is resetting the number of training + // iteration to zero. + // + // By default, this field is an empty graph and its evaluation does not + // produce any output. Thus, no initializer would be changed by default. + optional GraphProto initialization = 1; + + // This field represents a training algorithm step. Given required inputs, + // it computes outputs to update initializers in its own or inference graph's + // initializer lists. In general, this field contains loss node, gradient node, + // optimizer node, increment of iteration count. + // + // An execution of the training algorithm step is performed by executing the + // graph obtained by combining the inference graph (namely "ModelProto.graph") + // and the "algorithm" graph. That is, the actual + // input/initializer/output/node/value_info/sparse_initializer list of + // the training graph is the concatenation of + // "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer" + // and "algorithm.input/initializer/output/node/value_info/sparse_initializer" + // in that order. This combined graph must satisfy the normal ONNX conditions. + // Now, let's provide a visualization of graph combination for clarity. + // Let the inference graph (i.e., "ModelProto.graph") be + // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d + // and the "algorithm" graph be + // tensor_d -> Add -> tensor_e + // The combination process results + // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e + // + // Notice that an input of a node in the "algorithm" graph may reference the + // output of a node in the inference graph (but not the other way round). Also, inference + // node cannot reference inputs of "algorithm". With these restrictions, inference graph + // can always be run independently without training information. + // + // By default, this field is an empty graph and its evaluation does not + // produce any output. Evaluating the default training step never + // update any initializers. + optional GraphProto algorithm = 2; + + // This field specifies the bindings from the outputs of "initialization" to + // some initializers in "ModelProto.graph.initializer" and + // the "algorithm.initializer" in the same TrainingInfoProto. + // See "update_binding" below for details. + // + // By default, this field is empty and no initializer would be changed + // by the execution of "initialization". + repeated StringStringEntryProto initialization_binding = 3; + + // Gradient-based training is usually an iterative procedure. In one gradient + // descent iteration, we apply + // + // x = x - r * g + // + // where "x" is the optimized tensor, "r" stands for learning rate, and "g" is + // gradient of "x" with respect to a chosen loss. To avoid adding assignments + // into the training graph, we split the update equation into + // + // y = x - r * g + // x = y + // + // The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To + // tell that "y" should be assigned to "x", the field "update_binding" may + // contain a key-value pair of strings, "x" (key of StringStringEntryProto) + // and "y" (value of StringStringEntryProto). + // For a neural network with multiple trainable (mutable) tensors, there can + // be multiple key-value pairs in "update_binding". + // + // The initializers appears as keys in "update_binding" are considered + // mutable variables. This implies some behaviors + // as described below. + // + // 1. We have only unique keys in all "update_binding"s so that two + // variables may not have the same name. This ensures that one + // variable is assigned up to once. + // 2. The keys must appear in names of "ModelProto.graph.initializer" or + // "TrainingInfoProto.algorithm.initializer". + // 3. The values must be output names of "algorithm" or "ModelProto.graph.output". + // 4. Mutable variables are initialized to the value specified by the + // corresponding initializer, and then potentially updated by + // "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s. + // + // This field usually contains names of trainable tensors + // (in ModelProto.graph), optimizer states such as momentums in advanced + // stochastic gradient methods (in TrainingInfoProto.graph), + // and number of training iterations (in TrainingInfoProto.graph). + // + // By default, this field is empty and no initializer would be changed + // by the execution of "algorithm". + repeated StringStringEntryProto update_binding = 4; +} + +// Models +// +// ModelProto is a top-level file/container format for bundling a ML model and +// associating its computation graph with metadata. +// +// The semantics of the model are described by the associated GraphProto's. +message ModelProto { + // The version of the IR this model targets. See Version enum above. + // This field MUST be present. + optional int64 ir_version = 1; + + // The OperatorSets this model relies on. + // All ModelProtos MUST have at least one entry that + // specifies which version of the ONNX OperatorSet is + // being imported. + // + // All nodes in the ModelProto's graph will bind against the operator + // with the same-domain/same-op_type operator with the HIGHEST version + // in the referenced operator sets. + repeated OperatorSetIdProto opset_import = 8; + + // The name of the framework or tool used to generate this model. + // This field SHOULD be present to indicate which implementation/tool/framework + // emitted the model. + optional string producer_name = 2; + + // The version of the framework or tool used to generate this model. + // This field SHOULD be present to indicate which implementation/tool/framework + // emitted the model. + optional string producer_version = 3; + + // Domain name of the model. + // We use reverse domain names as name space indicators. For example: + // `com.facebook.fair` or `com.microsoft.cognitiveservices` + // + // Together with `model_version` and GraphProto.name, this forms the unique identity of + // the graph. + optional string domain = 4; + + // The version of the graph encoded. See Version enum below. + optional int64 model_version = 5; + + // A human-readable documentation for this model. Markdown is allowed. + optional string doc_string = 6; + + // The parameterized graph that is evaluated to execute the model. + optional GraphProto graph = 7; + + // Named metadata values; keys should be distinct. + repeated StringStringEntryProto metadata_props = 14; + + // Training-specific information. Sequentially executing all stored + // `TrainingInfoProto.algorithm`s and assigning their outputs following + // the corresponding `TrainingInfoProto.update_binding`s is one training + // iteration. Similarly, to initialize the model + // (as if training hasn't happened), the user should sequentially execute + // all stored `TrainingInfoProto.initialization`s and assigns their outputs + // using `TrainingInfoProto.initialization_binding`s. + // + // If this field is empty, the training behavior of the model is undefined. + repeated TrainingInfoProto training_info = 20; + + // A list of function protos local to the model. + // + // The (domain, name, overload) tuple must be unique across the function protos in this list. + // In case of any conflicts the behavior (whether the model local functions are given higher priority, + // or standard operator sets are given higher priority or this is treated as error) is defined by + // the runtimes. + // + // The operator sets imported by FunctionProto should be compatible with the ones + // imported by ModelProto and other model local FunctionProtos. + // Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto + // or by 2 FunctionProtos then versions for the operator set may be different but, + // the operator schema returned for op_type, domain, version combination + // for both the versions should be same for every node in the function body. + // + // One FunctionProto can reference other FunctionProto in the model, however, recursive reference + // is not allowed. + repeated FunctionProto functions = 25; + + // Describes different target configurations for a multi-device use case. + // A model MAY describe multiple multi-device configurations for execution. + repeated DeviceConfigurationProto configuration = 26; +}; + +// DeviceConfigurationProto describes a multi-device configuration for a model. +message DeviceConfigurationProto { + // This field MUST be present for this version of the IR. + // Name of the configuration. + optional string name = 1; + // This field MUST be present for this version of the IR. + // Number of devices inside this configuration. + optional int32 num_devices = 2; + // Optional names of the devices. MUST be length of num_devices if provided. + repeated string device = 3; +} + +// StringStringEntryProto follows the pattern for cross-proto-version maps. +// See https://developers.google.com/protocol-buffers/docs/proto3#maps +message StringStringEntryProto { + optional string key = 1; + optional string value = 2; +}; + +message TensorAnnotation { + optional string tensor_name = 1; + // pairs to annotate tensor specified by above. + // The keys used in the mapping below must be pre-defined in ONNX spec. + // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as + // quantization parameter keys. + repeated StringStringEntryProto quant_parameter_tensor_names = 2; +} + + + +// Graphs +// +// A graph defines the computational logic of a model and is comprised of a parameterized +// list of nodes that form a directed acyclic graph based on their inputs and outputs. +// This is the equivalent of the "network" or "graph" in many deep learning +// frameworks. +message GraphProto { + // The nodes in the graph, sorted topologically. + repeated NodeProto node = 1; + + // The name of the graph. + optional string name = 2; // namespace Graph + + // A list of named tensor values, used to specify constant inputs of the graph. + // Each initializer (both TensorProto as well SparseTensorProto) MUST have a name. + // The name MUST be unique across both initializer and sparse_initializer, + // but the name MAY also appear in the input list. + repeated TensorProto initializer = 5; + + // Initializers (see above) stored in sparse format. + repeated SparseTensorProto sparse_initializer = 15; + + // A human-readable documentation for this graph. Markdown is allowed. + optional string doc_string = 10; + + // The inputs and outputs of the graph. + repeated ValueInfoProto input = 11; + repeated ValueInfoProto output = 12; + + // Information for the values in the graph. The ValueInfoProto.name's + // must be distinct. It is optional for a value to appear in value_info list. + repeated ValueInfoProto value_info = 13; + + // This field carries information to indicate the mapping among a tensor and its + // quantization parameter tensors. For example: + // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated, + // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model. + repeated TensorAnnotation quantization_annotation = 14; + + // Named metadata values; keys should be distinct. + repeated StringStringEntryProto metadata_props = 16; + + reserved 3, 4, 6 to 9; + reserved "ir_version", "producer_version", "producer_tag", "domain"; +} + +// Tensors +// +// A serialized tensor value. +message TensorProto { + enum DataType { + UNDEFINED = 0; + // Basic types. + FLOAT = 1; // float + UINT8 = 2; // uint8_t + INT8 = 3; // int8_t + UINT16 = 4; // uint16_t + INT16 = 5; // int16_t + INT32 = 6; // int32_t + INT64 = 7; // int64_t + STRING = 8; // string + BOOL = 9; // bool + + // IEEE754 half-precision floating-point format (16 bits wide). + // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits. + FLOAT16 = 10; + + DOUBLE = 11; + UINT32 = 12; + UINT64 = 13; + COMPLEX64 = 14; // complex with float32 real and imaginary components + COMPLEX128 = 15; // complex with float64 real and imaginary components + + // Non-IEEE floating-point format based on IEEE754 single-precision + // floating-point number truncated to 16 bits. + // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. + BFLOAT16 = 16; + + // Non-IEEE floating-point format based on papers + // FP8 Formats for Deep Learning, https://arxiv.org/abs/2209.05433, + // 8-bit Numerical Formats For Deep Neural Networks, https://arxiv.org/pdf/2206.02915.pdf. + // Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear. + // The computation usually happens inside a block quantize / dequantize + // fused by the runtime. + FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf + FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero + FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients + FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, not inf, mostly used for gradients, no negative zero + + // 4-bit integer data types + UINT4 = 21; // Unsigned integer in range [0, 15] + INT4 = 22; // Signed integer in range [-8, 7], using two's-complement representation + + // 4-bit floating point data types + FLOAT4E2M1 = 23; + + // E8M0 type used as the scale for microscaling (MX) formats: + // https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf + FLOAT8E8M0 = 24; + + // Future extensions go here. + } + + // The shape of the tensor. + repeated int64 dims = 1; + + // The data type of the tensor. + // This field MUST have a valid TensorProto.DataType value + optional int32 data_type = 2; + + // For very large tensors, we may want to store them in chunks, in which + // case the following fields will specify the segment that is stored in + // the current TensorProto. + message Segment { + optional int64 begin = 1; + optional int64 end = 2; + } + optional Segment segment = 3; + + // Tensor content must be organized in row-major order. + // + // Depending on the data_type field, exactly one of the fields below with + // name ending in _data is used to store the elements of the tensor. + + // For float and complex64 values + // Complex64 tensors are encoded as a single array of floats, + // with the real components appearing in odd numbered positions, + // and the corresponding imaginary component appearing in the + // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] + // is encoded as [1.0, 2.0 ,3.0 ,4.0] + // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. + repeated float float_data = 4 [packed = true]; + + // For int32, uint8, int8, uint16, int16, uint4, int4, bool, (b)float16, float8, and float4: + // - (b)float16 and float8 values MUST be converted bit-wise into an unsigned integer + // representation before being written to the buffer. + // - Each pair of uint4, int4, and float4 values MUST be packed as two 4-bit elements into a single byte. + // The first element is stored in the 4 least significant bits (LSB), + // and the second element is stored in the 4 most significant bits (MSB). + // + // Consequently: + // - For data types with a bit-width of 8 or greater, each `int32_data` stores one element. + // - For 4-bit data types, each `int32_data` stores two elements. + // + // When this field is present, the data_type field MUST be + // INT32, INT16, INT8, INT4, UINT16, UINT8, UINT4, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ, FLOAT8E8M0, FLOAT4E2M1 + repeated int32 int32_data = 5 [packed = true]; + + // For strings. + // Each element of string_data is a UTF-8 encoded Unicode + // string. No trailing null, no leading BOM. The protobuf "string" + // scalar type is not used to match ML community conventions. + // When this field is present, the data_type field MUST be STRING + repeated bytes string_data = 6; + + // For int64. + // When this field is present, the data_type field MUST be INT64 + repeated int64 int64_data = 7 [packed = true]; + + // Optionally, a name for the tensor. + optional string name = 8; // namespace Value + + // A human-readable documentation for this tensor. Markdown is allowed. + optional string doc_string = 12; + + // Serializations can either use one of the fields above, or use this + // raw bytes field. The only exception is the string case, where one is + // required to store the content in the repeated bytes string_data field. + // + // When this raw_data field is used to store tensor value, elements MUST + // be stored in as fixed-width, little-endian order. + // Floating-point data types MUST be stored in IEEE 754 format. + // Complex64 elements must be written as two consecutive FLOAT values, real component first. + // Complex128 elements must be written as two consecutive DOUBLE values, real component first. + // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). + // uint4 and int4 values must be packed to 4bitx2, the first element is stored in the 4 LSB and the second element is stored in the 4 MSB. + // + // Note: the advantage of specific field rather than the raw_data field is + // that in some cases (e.g. int data), protobuf does a better packing via + // variable length storage, and may lead to smaller binary footprint. + // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED + optional bytes raw_data = 9; + + // Data can be stored inside the protobuf file using type-specific fields or raw_data. + // Alternatively, raw bytes data can be stored in an external file, using the external_data field. + // external_data stores key-value pairs describing data location. Recognized keys are: + // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX + // protobuf model was stored + // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string. + // Offset values SHOULD be multiples 4096 (page size) to enable mmap support. + // - "length" (optional) - number of bytes containing data. Integer stored as string. + // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key. + repeated StringStringEntryProto external_data = 13; + + // Location of the data for this tensor. MUST be one of: + // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field. + // - EXTERNAL - data stored in an external location as described by external_data field. + enum DataLocation { + DEFAULT = 0; + EXTERNAL = 1; + } + + // If value not set, data is stored in raw_data (if set) otherwise in type-specified field. + optional DataLocation data_location = 14; + + // For double + // Complex128 tensors are encoded as a single array of doubles, + // with the real components appearing in odd numbered positions, + // and the corresponding imaginary component appearing in the + // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] + // is encoded as [1.0, 2.0 ,3.0 ,4.0] + // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 + repeated double double_data = 10 [packed = true]; + + // For uint64 and uint32 values + // When this field is present, the data_type field MUST be + // UINT32 or UINT64 + repeated uint64 uint64_data = 11 [packed = true]; + + // Named metadata values; keys should be distinct. + repeated StringStringEntryProto metadata_props = 16; +} + +// A serialized sparse-tensor value +message SparseTensorProto { + // The sequence of non-default values are encoded as a tensor of shape [NNZ]. + // The default-value is zero for numeric tensors, and empty-string for string tensors. + // values must have a non-empty name present which serves as a name for SparseTensorProto + // when used in sparse_initializer list. + optional TensorProto values = 1; + + // The indices of the non-default values, which may be stored in one of two formats. + // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value + // corresponding to the j-th index of the i-th value (in the values tensor). + // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value + // must be the linearized-index of the i-th value (in the values tensor). + // The linearized-index can be converted into an index tuple (k_1,...,k_rank) + // using the shape provided below. + // The indices must appear in ascending order without duplication. + // In the first format, the ordering is lexicographic-ordering: + // e.g., index-value [1,4] must appear before [2,1] + optional TensorProto indices = 2; + + // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank] + repeated int64 dims = 3; +} + +// Defines a tensor shape. A dimension can be either an integer value +// or a symbolic variable. A symbolic variable represents an unknown +// dimension. +message TensorShapeProto { + message Dimension { + oneof value { + int64 dim_value = 1; + string dim_param = 2; // namespace Shape + }; + // Standard denotation can optionally be used to denote tensor + // dimensions with standard semantic descriptions to ensure + // that operations are applied to the correct axis of a tensor. + // Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition + // for pre-defined dimension denotations. + optional string denotation = 3; + }; + repeated Dimension dim = 1; +} + +// Types +// +// The standard ONNX data types. +message TypeProto { + + message Tensor { + // This field MUST NOT have the value of UNDEFINED + // This field MUST have a valid TensorProto.DataType value + // This field MUST be present for this version of the IR. + optional int32 elem_type = 1; + optional TensorShapeProto shape = 2; + } + + // repeated T + message Sequence { + // The type and optional shape of each element of the sequence. + // This field MUST be present for this version of the IR. + optional TypeProto elem_type = 1; + }; + + // map + message Map { + // This field MUST have a valid TensorProto.DataType value + // This field MUST be present for this version of the IR. + // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING + optional int32 key_type = 1; + // This field MUST be present for this version of the IR. + optional TypeProto value_type = 2; + }; + + // wrapper for Tensor, Sequence, or Map + message Optional { + // The type and optional shape of the element wrapped. + // This field MUST be present for this version of the IR. + // Possible values correspond to OptionalProto.DataType enum + optional TypeProto elem_type = 1; + }; + + + message SparseTensor { + // This field MUST NOT have the value of UNDEFINED + // This field MUST have a valid TensorProto.DataType value + // This field MUST be present for this version of the IR. + optional int32 elem_type = 1; + optional TensorShapeProto shape = 2; + } + + + message Opaque { + // When missing, the domain is the same as the model's. + optional string domain = 1; + // The name is optional but significant when provided. + optional string name = 2; + // parameters that help defining the type + // DEPRECATED do not use. + // repeated TypeProto parameters = 3; + } + + + oneof value { + // The type of a tensor. + Tensor tensor_type = 1; + + // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values + // as input and output to graphs and nodes. These types are needed to naturally + // support classical ML operators. DNN operators SHOULD restrict their input + // and output types to tensors. + + // The type of a sequence. + Sequence sequence_type = 4; + + // The type of a map. + Map map_type = 5; + + // The type of an optional. + Optional optional_type = 9; + + + // Type of the sparse tensor + SparseTensor sparse_tensor_type = 8; + + + Opaque opaque_type = 7; + + } + + // An optional denotation can be used to denote the whole + // type with a standard semantic description as to what is + // stored inside. Refer to https://github.com/onnx/onnx/blob/main/docs/TypeDenotation.md#type-denotation-definition + // for pre-defined type denotations. + optional string denotation = 6; +} + +// Operator Sets +// +// OperatorSets are uniquely identified by a (domain, opset_version) pair. +message OperatorSetIdProto { + // The domain of the operator set being identified. + // The empty string ("") or absence of this field implies the operator + // set that is defined as part of the ONNX specification. + // This field MUST be present in this version of the IR when referring to any other operator set. + optional string domain = 1; + + // The version of the operator set being identified. + // This field MUST be present in this version of the IR. + optional int64 version = 2; +} + +// Operator/function status. +enum OperatorStatus { + EXPERIMENTAL = 0; + STABLE = 1; +} + +message FunctionProto { + // The name of the function, similar to op_type in NodeProto. + // This is part of the unique-id (domain, name, overload) of FunctionProtos in a model. + optional string name = 1; + + // Deprecated since IR Version 8 + // optional int64 since_version = 2; + reserved 2; + reserved "since_version"; + + // Deprecated since IR Version 8 + // optional OperatorStatus status = 3; + reserved 3; + reserved "status"; + + // The inputs and outputs of the function. + repeated string input = 4; + repeated string output = 5; + + // The attribute parameters of the function. + // It is for function parameters without default values. + repeated string attribute = 6; + + // The attribute protos of the function. + // It is for function attributes with default values. + // A function attribute shall be represented either as + // a string attribute or an AttributeProto, not both. + repeated AttributeProto attribute_proto = 11; + + // The nodes in the function. + repeated NodeProto node = 7; + // A human-readable documentation for this function. Markdown is allowed. + optional string doc_string = 8; + + // The OperatorSets this function body (graph) relies on. + // + // All nodes in the function body (graph) will bind against the operator + // with the same-domain/same-op_type operator with the HIGHEST version + // in the referenced operator sets. This means at most one version can be relied + // for one domain. + // + // The operator sets imported by FunctionProto should be compatible with the ones + // imported by ModelProto. Example, if same operator set say 'A' is imported by FunctionProto + // and ModelProto then versions for the operator set may be different but, + // the operator schema returned for op_type, domain, version combination + // for both the versions should be same. + + repeated OperatorSetIdProto opset_import = 9; + + // The domain which this function belongs to. + // This is part of the unique-id (domain, name, overload) of FunctionProtos in a model. + optional string domain = 10; + + // The overload identifier of the function. + // This is part of the unique-id (domain, name, overload) of FunctionProtos in a model. + optional string overload = 13; + + // Information for the values in the function. The ValueInfoProto.name's + // must be distinct and refer to names in the function (including inputs, + // outputs, and intermediate values). It is optional for a value to appear + // in value_info list. + repeated ValueInfoProto value_info = 12; + + // Named metadata values; keys should be distinct. + repeated StringStringEntryProto metadata_props = 14; +} + +// For using protobuf-lite +option optimize_for = LITE_RUNTIME; + diff --git a/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators-ml.proto b/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators-ml.proto new file mode 100644 index 0000000000000000000000000000000000000000..a9a07f12aa4f0ebd0999926449cae7886c6e231b --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators-ml.proto @@ -0,0 +1,136 @@ +// +// WARNING: This file is automatically generated! Please edit onnx.in.proto. +// + + +// Copyright (c) ONNX Project Contributors. +// Licensed under the Apache-2.0 license. + +syntax = "proto2"; + +package onnx; +import "onnx/onnx-ml.proto"; + +// +// This file contains the proto definitions for OperatorSetProto and +// OperatorProto. OperatorSetProtos are used to describe a versioned +// set of operators that can be used by a ModelProto. +// +// Like ModelProto, OperatorSetProto is defined as a top-level file/wire +// format, however their usage is different. +// +// ModelProto files are used to describe executable graphs that can be +// executed directly by a framework, runtime, or engine. +// +// OperatorSetProto files are used to describe a set of operators that are +// available in a given environment. The file TBD.TBD is the OperatorSetProto +// that describes the ONNX standard operators. +// + +// An OperatorProto represents the immutable specification of the signature +// and semantics of an operator. +// +// Operators are declared as part of an OperatorSet, which also defines the +// domain name for the set. +// +// Operators are uniquely identified by a three part identifier +// (domain, op_type, since_version) +// where +// *domain* is the domain of an operator set that +// contains this operator specification. +// +// *op_type* is the name of the operator as referenced by a +// NodeProto.op_type +// +// *since_version* is the version of the operator set that +// this operator was initially declared in. +// +message OperatorProto { + // The name of the operator within a domain. + // This field MUST be present in this version of the IR. + optional string op_type = 1; + + // The version of the operator set that first introduced this + // operator. This value MUST be the same value as the + // opset_version of the operator set that first published this operator. + // Subsequent versions of the operator set MUST NOT alter the signature + // or semantics of the operator once published as STABLE. + // This field MUST be present in this version of the IR. + optional int64 since_version = 2; + + // This field indicates whether the syntax, semantics, or presence + // of this operator is in an experimental or stable stage. Once an + // operator is published as STABLE, it's syntax and semantics MUST NOT + // change in subsequent versions of the operator set. + // When an operator is published as EXPERIMENTAL, the syntax and semantics + // of the operator MAY change across operator set versions. + // Operators "become" stable by deprecating the experimental version and + // introducing a new stable operator with the same op_type. + optional OperatorStatus status = 3; + + // Eventually we will declare the signature of the operator here + + // A human-readable documentation for this operator. Markdown is allowed. + optional string doc_string = 10; +} + +// An OperatorSetProto represents an immutable set of immutable operator +// specifications. +// +// The domain of the set (OperatorSetProto.domain) is a reverse-DNS name +// that disambiguates operator sets defined by independent entities. +// +// The version of the set (opset_version) is a monotonically increasing +// integer that indicates changes to the membership of the operator set. +// +// +// Operator sets are uniquely identified by a two part identifier (domain, opset_version) +// +// Like ModelProto, OperatorSetProto is intended as a top-level file/wire format, +// and thus has the standard format headers in addition to the operator set information. +// +message OperatorSetProto { + // All OperatorSetProtos start with a distinguished byte sequence to disambiguate + // protobuf files containing OperatorSets from other content. + // This field MUST be "ONNXOPSET" + // This field MUST be present in this version of the IR + optional string magic = 1; + + // All OperatorSetProtos indicate the version of the IR syntax and semantics + // they adhere to. It is always IR_VERSION. + // This field MUST be present in this version of the IR + optional int64 ir_version = 2; + + // The prerelease component of the SemVer of the IR. + // This field MAY be absent in this version of the IR + optional string ir_version_prerelease = 3; + + // The build metadata component of the SemVer of the IR. + // This field MAY be absent in this version of the IR + optional string ir_build_metadata = 7; + + // Domain name of the operator set, in reverse DNS form (e.g., com.acme.dnnops). + optional string domain = 4; + + // The version of the set of operators. This is a simple int value + // that is monotonically increasing as new versions of the operator set + // are published. All operators in this set MUST have since_version + // <= opset_version. + optional int64 opset_version = 5; + + // A human-readable documentation for this set of operators. Markdown is allowed. + optional string doc_string = 6; + + // The operators specified by this operator set. + // The (name, version) MUST be unique across all OperatorProtos in operator + repeated OperatorProto operator = 8; + + // The functions specified by this operator set. + // The (name, version) MUST be unique across all OperatorProtos/FunctionProtos in operator/functions + repeated FunctionProto functions = 9; +} + + +// For using protobuf-lite +option optimize_for = LITE_RUNTIME; + diff --git a/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators.in.proto b/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators.in.proto new file mode 100644 index 0000000000000000000000000000000000000000..cc84dd2f900be8aeeb8b235add083e3e9b44d98f --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators.in.proto @@ -0,0 +1,131 @@ +// Copyright (c) ONNX Project Contributors. +// Licensed under the Apache-2.0 license. + +syntax = "proto2"; + +package {PACKAGE_NAME}; +// #if ONNX-ML +import "onnx/onnx-ml.proto"; +// #else +import "onnx/onnx.proto"; +// #endif + +// +// This file contains the proto definitions for OperatorSetProto and +// OperatorProto. OperatorSetProtos are used to describe a versioned +// set of operators that can be used by a ModelProto. +// +// Like ModelProto, OperatorSetProto is defined as a top-level file/wire +// format, however their usage is different. +// +// ModelProto files are used to describe executable graphs that can be +// executed directly by a framework, runtime, or engine. +// +// OperatorSetProto files are used to describe a set of operators that are +// available in a given environment. The file TBD.TBD is the OperatorSetProto +// that describes the ONNX standard operators. +// + +// An OperatorProto represents the immutable specification of the signature +// and semantics of an operator. +// +// Operators are declared as part of an OperatorSet, which also defines the +// domain name for the set. +// +// Operators are uniquely identified by a three part identifier +// (domain, op_type, since_version) +// where +// *domain* is the domain of an operator set that +// contains this operator specification. +// +// *op_type* is the name of the operator as referenced by a +// NodeProto.op_type +// +// *since_version* is the version of the operator set that +// this operator was initially declared in. +// +message OperatorProto { + // The name of the operator within a domain. + // This field MUST be present in this version of the IR. + optional string op_type = 1; + + // The version of the operator set that first introduced this + // operator. This value MUST be the same value as the + // opset_version of the operator set that first published this operator. + // Subsequent versions of the operator set MUST NOT alter the signature + // or semantics of the operator once published as STABLE. + // This field MUST be present in this version of the IR. + optional int64 since_version = 2; + + // This field indicates whether the syntax, semantics, or presence + // of this operator is in an experimental or stable stage. Once an + // operator is published as STABLE, it's syntax and semantics MUST NOT + // change in subsequent versions of the operator set. + // When an operator is published as EXPERIMENTAL, the syntax and semantics + // of the operator MAY change across operator set versions. + // Operators "become" stable by deprecating the experimental version and + // introducing a new stable operator with the same op_type. + optional OperatorStatus status = 3; + + // Eventually we will declare the signature of the operator here + + // A human-readable documentation for this operator. Markdown is allowed. + optional string doc_string = 10; +} + +// An OperatorSetProto represents an immutable set of immutable operator +// specifications. +// +// The domain of the set (OperatorSetProto.domain) is a reverse-DNS name +// that disambiguates operator sets defined by independent entities. +// +// The version of the set (opset_version) is a monotonically increasing +// integer that indicates changes to the membership of the operator set. +// +// +// Operator sets are uniquely identified by a two part identifier (domain, opset_version) +// +// Like ModelProto, OperatorSetProto is intended as a top-level file/wire format, +// and thus has the standard format headers in addition to the operator set information. +// +message OperatorSetProto { + // All OperatorSetProtos start with a distinguished byte sequence to disambiguate + // protobuf files containing OperatorSets from other content. + // This field MUST be "ONNXOPSET" + // This field MUST be present in this version of the IR + optional string magic = 1; + + // All OperatorSetProtos indicate the version of the IR syntax and semantics + // they adhere to. It is always IR_VERSION. + // This field MUST be present in this version of the IR + optional int64 ir_version = 2; + + // The prerelease component of the SemVer of the IR. + // This field MAY be absent in this version of the IR + optional string ir_version_prerelease = 3; + + // The build metadata component of the SemVer of the IR. + // This field MAY be absent in this version of the IR + optional string ir_build_metadata = 7; + + // Domain name of the operator set, in reverse DNS form (e.g., com.acme.dnnops). + optional string domain = 4; + + // The version of the set of operators. This is a simple int value + // that is monotonically increasing as new versions of the operator set + // are published. All operators in this set MUST have since_version + // <= opset_version. + optional int64 opset_version = 5; + + // A human-readable documentation for this set of operators. Markdown is allowed. + optional string doc_string = 6; + + // The operators specified by this operator set. + // The (name, version) MUST be unique across all OperatorProtos in operator + repeated OperatorProto operator = 8; + + // The functions specified by this operator set. + // The (name, version) MUST be unique across all OperatorProtos/FunctionProtos in operator/functions + repeated FunctionProto functions = 9; +} + diff --git a/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators.proto b/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators.proto new file mode 100644 index 0000000000000000000000000000000000000000..2f772c64ce340bd91b825acde3fbaa4a8af2f0f3 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators.proto @@ -0,0 +1,136 @@ +// +// WARNING: This file is automatically generated! Please edit onnx.in.proto. +// + + +// Copyright (c) ONNX Project Contributors. +// Licensed under the Apache-2.0 license. + +syntax = "proto2"; + +package onnx; +import "onnx/onnx.proto"; + +// +// This file contains the proto definitions for OperatorSetProto and +// OperatorProto. OperatorSetProtos are used to describe a versioned +// set of operators that can be used by a ModelProto. +// +// Like ModelProto, OperatorSetProto is defined as a top-level file/wire +// format, however their usage is different. +// +// ModelProto files are used to describe executable graphs that can be +// executed directly by a framework, runtime, or engine. +// +// OperatorSetProto files are used to describe a set of operators that are +// available in a given environment. The file TBD.TBD is the OperatorSetProto +// that describes the ONNX standard operators. +// + +// An OperatorProto represents the immutable specification of the signature +// and semantics of an operator. +// +// Operators are declared as part of an OperatorSet, which also defines the +// domain name for the set. +// +// Operators are uniquely identified by a three part identifier +// (domain, op_type, since_version) +// where +// *domain* is the domain of an operator set that +// contains this operator specification. +// +// *op_type* is the name of the operator as referenced by a +// NodeProto.op_type +// +// *since_version* is the version of the operator set that +// this operator was initially declared in. +// +message OperatorProto { + // The name of the operator within a domain. + // This field MUST be present in this version of the IR. + optional string op_type = 1; + + // The version of the operator set that first introduced this + // operator. This value MUST be the same value as the + // opset_version of the operator set that first published this operator. + // Subsequent versions of the operator set MUST NOT alter the signature + // or semantics of the operator once published as STABLE. + // This field MUST be present in this version of the IR. + optional int64 since_version = 2; + + // This field indicates whether the syntax, semantics, or presence + // of this operator is in an experimental or stable stage. Once an + // operator is published as STABLE, it's syntax and semantics MUST NOT + // change in subsequent versions of the operator set. + // When an operator is published as EXPERIMENTAL, the syntax and semantics + // of the operator MAY change across operator set versions. + // Operators "become" stable by deprecating the experimental version and + // introducing a new stable operator with the same op_type. + optional OperatorStatus status = 3; + + // Eventually we will declare the signature of the operator here + + // A human-readable documentation for this operator. Markdown is allowed. + optional string doc_string = 10; +} + +// An OperatorSetProto represents an immutable set of immutable operator +// specifications. +// +// The domain of the set (OperatorSetProto.domain) is a reverse-DNS name +// that disambiguates operator sets defined by independent entities. +// +// The version of the set (opset_version) is a monotonically increasing +// integer that indicates changes to the membership of the operator set. +// +// +// Operator sets are uniquely identified by a two part identifier (domain, opset_version) +// +// Like ModelProto, OperatorSetProto is intended as a top-level file/wire format, +// and thus has the standard format headers in addition to the operator set information. +// +message OperatorSetProto { + // All OperatorSetProtos start with a distinguished byte sequence to disambiguate + // protobuf files containing OperatorSets from other content. + // This field MUST be "ONNXOPSET" + // This field MUST be present in this version of the IR + optional string magic = 1; + + // All OperatorSetProtos indicate the version of the IR syntax and semantics + // they adhere to. It is always IR_VERSION. + // This field MUST be present in this version of the IR + optional int64 ir_version = 2; + + // The prerelease component of the SemVer of the IR. + // This field MAY be absent in this version of the IR + optional string ir_version_prerelease = 3; + + // The build metadata component of the SemVer of the IR. + // This field MAY be absent in this version of the IR + optional string ir_build_metadata = 7; + + // Domain name of the operator set, in reverse DNS form (e.g., com.acme.dnnops). + optional string domain = 4; + + // The version of the set of operators. This is a simple int value + // that is monotonically increasing as new versions of the operator set + // are published. All operators in this set MUST have since_version + // <= opset_version. + optional int64 opset_version = 5; + + // A human-readable documentation for this set of operators. Markdown is allowed. + optional string doc_string = 6; + + // The operators specified by this operator set. + // The (name, version) MUST be unique across all OperatorProtos in operator + repeated OperatorProto operator = 8; + + // The functions specified by this operator set. + // The (name, version) MUST be unique across all OperatorProtos/FunctionProtos in operator/functions + repeated FunctionProto functions = 9; +} + + +// For using protobuf-lite +option optimize_for = LITE_RUNTIME; + diff --git a/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators_pb.h b/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators_pb.h new file mode 100644 index 0000000000000000000000000000000000000000..05cf569283fb5d60ec66d0fa00d7950658f71985 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/onnx-operators_pb.h @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include "onnx/onnx_pb.h" +#ifdef ONNX_ML +#include "onnx/onnx-operators-ml.pb.h" +#else +#include "onnx/onnx-operators.pb.h" +#endif diff --git a/pythonProject/.venv/Lib/site-packages/onnx/py.typed b/pythonProject/.venv/Lib/site-packages/onnx/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pythonProject/.venv/Lib/site-packages/onnx/py_utils.h b/pythonProject/.venv/Lib/site-packages/onnx/py_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..250bdd6fce18eddc031b0f0dbcf130ed35d27513 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/py_utils.h @@ -0,0 +1,23 @@ +// Copyright (c) ONNX Project Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "onnx/proto_utils.h" + +namespace ONNX_NAMESPACE { +namespace py = pybind11; + +template +bool ParseProtoFromPyBytes(Proto* proto, const py::bytes& bytes) { + // Get the buffer from Python bytes object + char* buffer = nullptr; + Py_ssize_t length = 0; + PyBytes_AsStringAndSize(bytes.ptr(), &buffer, &length); + + return ParseProtoFromBytes(proto, buffer, length); +} +} // namespace ONNX_NAMESPACE diff --git a/pythonProject/.venv/Lib/site-packages/onnx/serialization.py b/pythonProject/.venv/Lib/site-packages/onnx/serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..384ea259430d31671f857ba853d594d497483061 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/serialization.py @@ -0,0 +1,212 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +import warnings + +__all__ = [ + "registry", +] + +import typing +from typing import Any, Optional, Protocol, TypeVar + +import google.protobuf.json_format +import google.protobuf.message +import google.protobuf.text_format + +import onnx + +if typing.TYPE_CHECKING: + from collections.abc import Collection + +_Proto = TypeVar("_Proto", bound=google.protobuf.message.Message) +# Encoding used for serializing and deserializing text files +_ENCODING = "utf-8" + + +class ProtoSerializer(Protocol): + """A serializer-deserializer to and from in-memory Protocol Buffers representations.""" + + # Format supported by the serializer. E.g. "protobuf" + supported_format: str + # File extensions supported by the serializer. E.g. frozenset({".onnx", ".pb"}) + # Be careful to include the dot in the file extension. + file_extensions: Collection[str] + + # NOTE: The methods defined are serialize_proto and deserialize_proto and not the + # more generic serialize and deserialize to leave space for future protocols + # that are defined to serialize/deserialize the ONNX in memory IR. + # This way a class can implement both protocols. + + def serialize_proto(self, proto: _Proto) -> Any: + """Serialize a in-memory proto to a serialized data type.""" + + def deserialize_proto(self, serialized: Any, proto: _Proto) -> _Proto: + """Parse a serialized data type into a in-memory proto.""" + + +class _Registry: + def __init__(self) -> None: + self._serializers: dict[str, ProtoSerializer] = {} + # A mapping from file extension to format + self._extension_to_format: dict[str, str] = {} + + def register(self, serializer: ProtoSerializer) -> None: + self._serializers[serializer.supported_format] = serializer + self._extension_to_format.update( + dict.fromkeys(serializer.file_extensions, serializer.supported_format) + ) + + def get(self, fmt: str) -> ProtoSerializer: + """Get a serializer for a format. + + Args: + fmt: The format to get a serializer for. + + Returns: + ProtoSerializer: The serializer for the format. + + Raises: + ValueError: If the format is not supported. + """ + try: + return self._serializers[fmt] + except KeyError: + raise ValueError( + f"Unsupported format: '{fmt}'. Supported formats are: {self._serializers.keys()}" + ) from None + + def get_format_from_file_extension(self, file_extension: str) -> str | None: + """Get the corresponding format from a file extension. + + Args: + file_extension: The file extension to get a format for. + + Returns: + The format for the file extension, or None if not found. + """ + return self._extension_to_format.get(file_extension) + + +class _ProtobufSerializer(ProtoSerializer): + """Serialize and deserialize protobuf message.""" + + supported_format = "protobuf" + file_extensions = frozenset({".onnx", ".pb"}) + + def serialize_proto(self, proto: _Proto) -> bytes: + if hasattr(proto, "SerializeToString") and callable(proto.SerializeToString): + try: + result = proto.SerializeToString() + except ValueError as e: + if proto.ByteSize() >= onnx.checker.MAXIMUM_PROTOBUF: + raise ValueError( + "The proto size is larger than the 2 GB limit. " + "Please use save_as_external_data to save tensors separately from the model file." + ) from e + raise + return result # type: ignore + raise TypeError( + f"No SerializeToString method is detected.\ntype is {type(proto)}" + ) + + def deserialize_proto(self, serialized: bytes, proto: _Proto) -> _Proto: + if not isinstance(serialized, bytes): + raise TypeError( + f"Parameter 'serialized' must be bytes, but got type: {type(serialized)}" + ) + decoded = typing.cast("Optional[int]", proto.ParseFromString(serialized)) + if decoded is not None and decoded != len(serialized): + raise google.protobuf.message.DecodeError( + f"Protobuf decoding consumed too few bytes: {decoded} out of {len(serialized)}" + ) + return proto + + +class _TextProtoSerializer(ProtoSerializer): + """Serialize and deserialize text proto.""" + + supported_format = "textproto" + file_extensions = frozenset({".txtpb", ".textproto", ".prototxt", ".pbtxt"}) + + def serialize_proto(self, proto: _Proto) -> bytes: + textproto = google.protobuf.text_format.MessageToString(proto) + return textproto.encode(_ENCODING) + + def deserialize_proto(self, serialized: bytes | str, proto: _Proto) -> _Proto: + if not isinstance(serialized, (bytes, str)): + raise TypeError( + f"Parameter 'serialized' must be bytes or str, but got type: {type(serialized)}" + ) + if isinstance(serialized, bytes): + serialized = serialized.decode(_ENCODING) + assert isinstance(serialized, str) + return google.protobuf.text_format.Parse(serialized, proto) + + +class _JsonSerializer(ProtoSerializer): + """Serialize and deserialize JSON.""" + + supported_format = "json" + file_extensions = frozenset({".json", ".onnxjson"}) + + def serialize_proto(self, proto: _Proto) -> bytes: + json_message = google.protobuf.json_format.MessageToJson( + proto, preserving_proto_field_name=True + ) + return json_message.encode(_ENCODING) + + def deserialize_proto(self, serialized: bytes | str, proto: _Proto) -> _Proto: + if not isinstance(serialized, (bytes, str)): + raise TypeError( + f"Parameter 'serialized' must be bytes or str, but got type: {type(serialized)}" + ) + if isinstance(serialized, bytes): + serialized = serialized.decode(_ENCODING) + assert isinstance(serialized, str) + return google.protobuf.json_format.Parse(serialized, proto) + + +class _TextualSerializer(ProtoSerializer): + """Serialize and deserialize the ONNX textual representation.""" + + supported_format = "onnxtxt" + file_extensions = frozenset({".onnxtxt", ".onnxtext"}) + + def serialize_proto(self, proto: _Proto) -> bytes: + text = onnx.printer.to_text(proto) # type: ignore[arg-type] + return text.encode(_ENCODING) + + def deserialize_proto(self, serialized: bytes | str, proto: _Proto) -> _Proto: + warnings.warn( + "The onnxtxt format is experimental. Please report any errors to the ONNX GitHub repository.", + stacklevel=2, + ) + if not isinstance(serialized, (bytes, str)): + raise TypeError( + f"Parameter 'serialized' must be bytes or str, but got type: {type(serialized)}" + ) + if isinstance(serialized, bytes): + text = serialized.decode(_ENCODING) + else: + text = serialized + if isinstance(proto, onnx.ModelProto): + return onnx.parser.parse_model(text) # type: ignore[return-value] + if isinstance(proto, onnx.GraphProto): + return onnx.parser.parse_graph(text) # type: ignore[return-value] + if isinstance(proto, onnx.FunctionProto): + return onnx.parser.parse_function(text) # type: ignore[return-value] + if isinstance(proto, onnx.NodeProto): + return onnx.parser.parse_node(text) # type: ignore[return-value] + raise ValueError(f"Unsupported proto type: {type(proto)}") + + +# Register default serializers +registry = _Registry() +registry.register(_ProtobufSerializer()) +registry.register(_TextProtoSerializer()) +registry.register(_JsonSerializer()) +registry.register(_TextualSerializer()) diff --git a/pythonProject/.venv/Lib/site-packages/onnx/shape_inference.py b/pythonProject/.venv/Lib/site-packages/onnx/shape_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..4a4a696eaf814e6adc4844a7d3583b390fbc9f1e --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/shape_inference.py @@ -0,0 +1,177 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 + +"""onnx shape inference. Shape inference is not guaranteed to be +complete. + +""" + +from __future__ import annotations + +import os +from typing import TYPE_CHECKING + +import onnx +import onnx.onnx_cpp2py_export.shape_inference as C # noqa: N812 +from onnx.onnx_pb import AttributeProto, FunctionProto, ModelProto, TypeProto + +if TYPE_CHECKING: + from collections.abc import Sequence + +GraphInferencer = C.GraphInferencer +InferenceContext = C.InferenceContext + + +def infer_shapes( + model: ModelProto | bytes, + check_type: bool = False, + strict_mode: bool = False, + data_prop: bool = False, +) -> ModelProto: + """Apply shape inference to the provided ModelProto. + + Inferred shapes are added to the value_info field of the graph. + + If the inferred values conflict with values already provided in the + graph, that means that the provided values are invalid (or there is a + bug in shape inference), and the result is unspecified. + + Arguments: + model: ModelProto. + check_type: Checks the type-equality for input and output. + strict_mode: Stricter shape inference, it will throw errors if any; + Otherwise, simply stop if any error. + data_prop: Enables data propagation for limited operators to perform shape computation. + + Returns: + (ModelProto) model with inferred shape information + """ + if isinstance(model, (ModelProto, bytes)): + model_str = model if isinstance(model, bytes) else model.SerializeToString() + inferred_model_str = C.infer_shapes( + model_str, check_type, strict_mode, data_prop + ) + return onnx.load_from_string(inferred_model_str) + if isinstance(model, str): + raise TypeError( + "infer_shapes only accepts ModelProto or bytes," + "you can use infer_shapes_path for the model path (String)." + ) + + raise TypeError( + f"infer_shapes only accepts ModelProto or bytes, incorrect type: {type(model)}" + ) + + +def infer_shapes_path( + model_path: str | os.PathLike, + output_path: str | os.PathLike = "", + check_type: bool = False, + strict_mode: bool = False, + data_prop: bool = False, +) -> None: + """Take model path for shape_inference. + + This function is the same as :func:`infer_shape` but supports >2GB models. + The function outputs the inferred model to the `output_path`. The original model path + is used if not specified. + """ + if isinstance(model_path, ModelProto): + raise TypeError( + "infer_shapes_path only accepts model Path (String)," + "you can use infer_shapes for the ModelProto." + ) + try: + model_path = os.fspath(model_path) + except TypeError as exp: + raise TypeError( + "infer_shapes_path only accepts model path as a string or PathLike, " + f"incorrect model path type: {type(model_path)}" + ) from exp + try: + output_path = os.fspath(output_path) + except TypeError as exp: + raise TypeError( + "infer_shapes_path only accepts output path as a string or PathLike, " + f"incorrect output path type: {type(output_path)}" + ) from exp + + if output_path == "": + output_path = model_path + C.infer_shapes_path(model_path, output_path, check_type, strict_mode, data_prop) + + +def infer_node_outputs( + schema: onnx.defs.OpSchema, + node: onnx.NodeProto, + input_types: dict[str, onnx.TypeProto], + input_data: dict[str, onnx.TensorProto] | None = None, + input_sparse_data: dict[str, onnx.SparseTensorProto] | None = None, + opset_imports: list[onnx.OperatorSetIdProto] | None = None, + ir_version: int = onnx.IR_VERSION, +) -> dict[str, onnx.TypeProto]: + if not schema.has_type_and_shape_inference_function: + return {} + if input_data is None: + input_data = {} + if input_sparse_data is None: + input_sparse_data = {} + if opset_imports is None: + passed_opset_imports = {} + else: + passed_opset_imports = {opset.domain: opset.version for opset in opset_imports} + + # catch KeyError if node's input does not exist in input_types + passed_input_types = { + key: input_types[key].SerializeToString() for key in node.input if key != "" + } + # input_types will also be used as outer_scope_value_types so do not filter by node's input here + for key, value in input_types.items(): + if key not in passed_input_types: + passed_input_types[key] = value.SerializeToString() + passed_input_data = { + key: input_data[key].SerializeToString() + for key in node.input + if key in input_data + } + passed_sparse_input_data = { + key: input_sparse_data[key].SerializeToString() + for key in node.input + if key in input_sparse_data + } + + outputs = schema._infer_node_outputs( + node.SerializeToString(), + passed_input_types, + passed_input_data, + passed_sparse_input_data, + passed_opset_imports, + ir_version, + ) # type: ignore[call-arg] + return {key: onnx.TypeProto.FromString(out) for key, out in outputs.items()} + + +def infer_function_output_types( + function: FunctionProto, + input_types: Sequence[TypeProto], + attributes: Sequence[AttributeProto], +) -> list[TypeProto]: + """Apply type-and-shape-inference to given function body, with given input types + and given input attribute values. + """ + result = C.infer_function_output_types( + function.SerializeToString(), + [x.SerializeToString() for x in input_types], + [x.SerializeToString() for x in attributes], + ) + + def to_type_proto(x) -> TypeProto: + type_proto = onnx.TypeProto() + type_proto.ParseFromString(x) + return type_proto + + return [to_type_proto(x) for x in result] + + +InferenceError = C.InferenceError diff --git a/pythonProject/.venv/Lib/site-packages/onnx/string_utils.h b/pythonProject/.venv/Lib/site-packages/onnx/string_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..a8b56ccd92cca0a006c1ec9500258a22dbc8d34f --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/string_utils.h @@ -0,0 +1,61 @@ +// Copyright (c) ONNX Project Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +namespace ONNX_NAMESPACE { + +#if defined(__ANDROID__) +template +std::string to_string(T value) { + std::ostringstream os; + os << value; + return os.str(); +} + +inline int stoi(const std::string& str) { + std::stringstream ss; + int n = 0; + ss << str; + ss >> n; + return n; +} + +#else +using std::stoi; +using std::to_string; +#endif // defined(__ANDROID__) + +inline void MakeStringInternal(std::stringstream& /*ss*/) {} + +template +inline void MakeStringInternal(std::stringstream& ss, const T& t) { + ss << t; +} + +template +inline void MakeStringInternal(std::stringstream& ss, const T& t, const Args&... args) { + MakeStringInternal(ss, t); + MakeStringInternal(ss, args...); +} + +template +std::string MakeString(const Args&... args) { + std::stringstream ss; + MakeStringInternal(ss, args...); + return std::string(ss.str()); +} + +// Specializations for already-a-string types. +template <> +inline std::string MakeString(const std::string& str) { + return str; +} +inline std::string MakeString(const char* c_str) { + return std::string(c_str); +} +} // namespace ONNX_NAMESPACE diff --git a/pythonProject/.venv/Lib/site-packages/onnx/subbyte.py b/pythonProject/.venv/Lib/site-packages/onnx/subbyte.py new file mode 100644 index 0000000000000000000000000000000000000000..c997331190075e996a32edc1846a4062586f72f1 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/subbyte.py @@ -0,0 +1,168 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import typing + +import numpy as np +import numpy.typing as npt +import typing_extensions + +if typing.TYPE_CHECKING: + from collections.abc import Sequence + +INT4_MIN = -8 +INT4_MAX = 7 +UINT4_MIN = 0 +UINT4_MAX = 15 + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion", + category=DeprecationWarning, +) +def float32_to_4bit_unpacked(x: np.ndarray | float, signed: bool) -> np.ndarray: + """Cast to 4bit via rounding and clipping (without packing). + + Args: + x: element to be converted + signed: boolean, whether to convert to signed int4. + + Returns: + An ndarray with a single int4 element (sign-extended to int8/uint8) + """ + dtype = np.int8 if signed else np.uint8 + clip_low = INT4_MIN if signed else UINT4_MIN + clip_high = INT4_MAX if signed else UINT4_MAX + if not isinstance(x, np.ndarray): + x = np.asarray(x) + + clipped = np.clip(x, clip_low, clip_high) + return np.rint(clipped).astype(dtype) # type: ignore[no-any-return] + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion", + category=DeprecationWarning, +) +def float32x2_to_4bitx2( + val_low: np.dtype, val_high: np.dtype, signed: bool +) -> np.ndarray: + """Cast two elements to 4bit (via rounding and clipping) and pack + to a single byte + Args: + val_low: element to be packed in the 4 LSB + val_high: element to be packed in the 4 MSB + signed: boolean, whether to convert to signed int4. + + Returns: + An ndarray with a single int8/uint8 element, containing both int4 elements + """ + i8_high = float32_to_4bit_unpacked(val_high, signed) # type: ignore[arg-type] + i8_low = float32_to_4bit_unpacked(val_low, signed) # type: ignore[arg-type] + return i8_high << 4 | i8_low & 0x0F + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion", + category=DeprecationWarning, +) +def unpack_4bitx2( + x: npt.NDArray[np.uint8], dims: int | Sequence[int] +) -> npt.NDArray[np.uint8]: + """Unpack an array of packed uint8 elements (4bitx2) into individual elements + (still represented as uint8) + + Args: + x: Input data + dims: The shape of the output array. + + Returns: + A array containing unpacked 4-bit elements (as int8/uint8) + """ + res = np.empty([x.size * 2], dtype=np.uint8) + x_low = x & np.uint8(0x0F) + x_high = x & np.uint8(0xF0) + x_high >>= np.uint8(4) + res[0::2] = x_low + res[1::2] = x_high + if ( + res.size == np.prod(dims) + 1 + ): # handle single-element padding due to odd number of elements + res = res.ravel()[:-1] + res = res.reshape(dims) + return res + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion", + category=DeprecationWarning, +) +def unpack_single_4bitx2( + x: np.ndarray | np.dtype | float, signed: bool +) -> tuple[np.ndarray, np.ndarray]: + def unpack_signed(x): + return np.where((x >> 3) == 0, x, x | 0xF0) + + """Unpack a single byte 4bitx2 to two 4 bit elements + Args: + x: Input data + signed: boolean, whether to interpret as signed int4. + Returns: + A tuple of ndarrays containing int4 elements (sign-extended to int8/uint8) + """ + if not isinstance(x, np.ndarray): + x = np.asarray(x) + x_low = x & 0x0F + x_high = x >> 4 + x_low = unpack_signed(x_low) if signed else x_low + x_high = unpack_signed(x_high) if signed else x_high + dtype = np.int8 if signed else np.uint8 + return (x_low.astype(dtype), x_high.astype(dtype)) + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion", + category=DeprecationWarning, +) +def float32_to_float4e2m1_unpacked(values: np.ndarray) -> np.ndarray: + """Cast float32 to float4e2m1 (without packing). + + Args: + values: element or array to be converted + + Returns: + An ndarray with unpacked float4e2m1 elements (as uint8) + """ + sign = np.where(np.signbit(values), 0x8, 0x0).astype(np.uint8) + magnitude = np.abs(values) + res = np.zeros(values.shape, dtype=np.uint8) + res[(magnitude > 0.25) & (magnitude < 0.75)] = 0x1 # noqa: PLR2004 + res[(magnitude >= 0.75) & (magnitude <= 1.25)] = 0x2 # noqa: PLR2004 + res[(magnitude > 1.25) & (magnitude < 1.75)] = 0x3 # noqa: PLR2004 + res[(magnitude >= 1.75) & (magnitude <= 2.5)] = 0x4 # noqa: PLR2004 + res[(magnitude > 2.5) & (magnitude < 3.5)] = 0x5 # noqa: PLR2004 + res[(magnitude >= 3.5) & (magnitude <= 5.0)] = 0x6 # noqa: PLR2004 + res[magnitude > 5.0] = 0x7 # noqa: PLR2004 + res |= sign + res[np.isnan(values)] = 0x7 + return res + + +@typing_extensions.deprecated( + "Deprecated since 1.18. Scheduled to remove in 1.20. Consider using libraries like ml_dtypes for dtype conversion", + category=DeprecationWarning, +) +def float32x2_to_float4e2m1x2(val_low: np.ndarray, val_high: np.ndarray) -> np.ndarray: + """Cast two elements to float4e2m1 and pack to a single byte + Args: + val_low: element to be packed in the 4 LSB + val_high: element to be packed in the 4 MSB + + Returns: + An ndarray with uint8 elements, containing both float4e2m1 elements + """ + i8_high = float32_to_float4e2m1_unpacked(val_high) + i8_low = float32_to_float4e2m1_unpacked(val_low) + return i8_high << 4 | i8_low & 0x0F diff --git a/pythonProject/.venv/Lib/site-packages/onnx/utils.py b/pythonProject/.venv/Lib/site-packages/onnx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d4eba55233a27c1f5569de4ea0e2df652a9cefd --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/utils.py @@ -0,0 +1,321 @@ +# Copyright (c) ONNX Project Contributors +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import os +import tarfile +from collections import deque +from typing import TYPE_CHECKING + +import onnx.checker +import onnx.helper +import onnx.shape_inference + +if TYPE_CHECKING: + from onnx.onnx_pb import ( + FunctionProto, + ModelProto, + NodeProto, + TensorProto, + ValueInfoProto, + ) + + +class Extractor: + def __init__(self, model: ModelProto) -> None: + self.model = model + self.graph = self.model.graph + self.initializers: dict[str, TensorProto] = self._build_name2obj_dict( + self.graph.initializer + ) + self.value_infos: dict[str, ValueInfoProto] = self._build_name2obj_dict( + self.graph.value_info + ) + # Add input and output values (not included in the value_info for intermediate values) + self.value_infos.update(self._build_name2obj_dict(self.graph.input)) + self.value_infos.update(self._build_name2obj_dict(self.graph.output)) + self.outmap: dict[str, int] = self._build_output_dict(self.graph) + + @staticmethod + def _build_name2obj_dict(objs) -> dict: + return {obj.name: obj for obj in objs} + + @staticmethod + def _build_output_dict(graph) -> dict[str, int]: + output_to_index: dict[str, int] = {} + for index, node in enumerate(graph.node): + for output_name in node.output: + if output_name == "": + continue + assert output_name not in output_to_index # output_name is unique + output_to_index[output_name] = index + return output_to_index + + def _collect_new_io(self, io_names_to_extract: list[str]) -> list[ValueInfoProto]: + # Validate that all names exist in self.value_infos + missing_names = [ + name for name in io_names_to_extract if name not in self.value_infos + ] + if missing_names: + raise ValueError( + f"The following names were not found in value_infos: {', '.join(missing_names)}" + ) + return [self.value_infos[name] for name in io_names_to_extract] + + def _dfs_search_reachable_nodes( + self, + node_output_name: str, + graph_input_names: set[str], + reachable: set[int], + ) -> None: + """Helper function to find nodes which are connected to an output + + Arguments: + node_output_name (str): The name of the output + graph_input_names (set of string): The names of all inputs of the graph + reachable (set of int): The set of indexes to reachable nodes in `nodes` + """ + stack = [node_output_name] + while stack: + current_output_name = stack.pop() + # finish search at inputs + if current_output_name in graph_input_names: + continue + # find nodes connected to this output + if current_output_name in self.outmap: + index = self.outmap[current_output_name] + if index not in reachable: + # add nodes connected to this output to sets + reachable.add(index) + stack += [ + input_name + for input_name in self.graph.node[index].input + if input_name != "" + ] + + def _collect_reachable_nodes( + self, + input_names: list[str], + output_names: list[str], + ) -> list[NodeProto]: + _input_names = set(input_names) + reachable: set[int] = set() + for name in output_names: + self._dfs_search_reachable_nodes(name, _input_names, reachable) + # needs to be topologically sorted + return [self.graph.node[index] for index in sorted(reachable)] + + def _collect_referred_local_functions( + self, + nodes: list[NodeProto], + ) -> list[FunctionProto]: + # a node in a model graph may refer a function. + # a function contains nodes, some of which may in turn refer a function. + # we need to find functions referred by graph nodes and + # by nodes used to define functions. + function_map: dict[tuple[str, str], FunctionProto] = {} + for function in self.model.functions: + function_map[(function.name, function.domain)] = function + referred_local_functions: list[FunctionProto] = [] + queue = deque(nodes) + while queue: + node = queue.popleft() + # check if the node is a function op + if (node.op_type, node.domain) in function_map: + function = function_map.pop((node.op_type, node.domain)) + referred_local_functions.append(function) + queue.extend(function.node) + # needs to be topologically sorted + return referred_local_functions + + def _collect_reachable_tensors( + self, + nodes: list[NodeProto], + ) -> tuple[list[TensorProto], list[ValueInfoProto]]: + all_tensors_names: set[str] = set() + for node in nodes: + all_tensors_names.update(node.input) + all_tensors_names.update(node.output) + initializer = [ + self.initializers[t] for t in self.initializers if t in all_tensors_names + ] + value_info = [ + self.value_infos[t] for t in self.value_infos if t in all_tensors_names + ] + len_sparse_initializer = len(self.graph.sparse_initializer) + if len_sparse_initializer != 0: + raise ValueError( + f"len_sparse_initializer is {len_sparse_initializer}, it must be 0." + ) + len_quantization_annotation = len(self.graph.quantization_annotation) + if len_quantization_annotation != 0: + raise ValueError( + f"len_quantization_annotation is {len_quantization_annotation}, it must be 0." + ) + return initializer, value_info + + def _make_model( + self, + nodes: list[NodeProto], + inputs: list[ValueInfoProto], + outputs: list[ValueInfoProto], + initializer: list[TensorProto], + value_info: list[ValueInfoProto], + local_functions: list[FunctionProto], + ) -> ModelProto: + name = "Extracted from {" + self.graph.name + "}" + graph = onnx.helper.make_graph( + nodes, name, inputs, outputs, initializer=initializer, value_info=value_info + ) + meta = { + "ir_version": self.model.ir_version, + "opset_imports": self.model.opset_import, + "producer_name": "onnx.utils.extract_model", + "functions": local_functions, + } + return onnx.helper.make_model(graph, **meta) + + def extract_model( + self, + input_names: list[str], + output_names: list[str], + ) -> ModelProto: + inputs = self._collect_new_io(input_names) + outputs = self._collect_new_io(output_names) + nodes = self._collect_reachable_nodes(input_names, output_names) + initializer, value_info = self._collect_reachable_tensors(nodes) + local_functions = self._collect_referred_local_functions(nodes) + model = self._make_model( + nodes, inputs, outputs, initializer, value_info, local_functions + ) + return model + + +def extract_model( + input_path: str | os.PathLike, + output_path: str | os.PathLike, + input_names: list[str], + output_names: list[str], + check_model: bool = True, + infer_shapes: bool = True, +) -> None: + """Extracts sub-model from an ONNX model. + + The sub-model is defined by the names of the input and output tensors *exactly*. + + Note: For control-flow operators, e.g. If and Loop, the _boundary of sub-model_, + which is defined by the input and output tensors, should not _cut through_ the + subgraph that is connected to the _main graph_ as attributes of these operators. + + Note: When the extracted model size is larger than 2GB, the extra data will be saved in "output_path.data". + + Arguments: + input_path (str | os.PathLike): The path to original ONNX model. + output_path (str | os.PathLike): The path to save the extracted ONNX model. + input_names (list of string): The names of the input tensors that to be extracted. + output_names (list of string): The names of the output tensors that to be extracted. + check_model (bool): Whether to run model checker on the original model and the extracted model. + infer_shapes (bool): Whether to infer the shapes of the original model. + """ + if not os.path.exists(input_path): + raise ValueError(f"Invalid input model path: {input_path}") + if not output_path: + raise ValueError("Output model path shall not be empty!") + if not input_names: + raise ValueError("Input tensor names shall not be empty!") + if not output_names: + raise ValueError("Output tensor names shall not be empty!") + + if len(input_names) != len(set(input_names)): + raise ValueError("Duplicate names found in the input tensor names.") + if len(output_names) != len(set(output_names)): + raise ValueError("Duplicate names found in the output tensor names.") + + if check_model: + onnx.checker.check_model(input_path) + + if infer_shapes and os.path.getsize(input_path) > onnx.checker.MAXIMUM_PROTOBUF: + onnx.shape_inference.infer_shapes_path(input_path, output_path) + model = onnx.load(output_path) + elif infer_shapes: + model = onnx.load(input_path, load_external_data=False) + model = onnx.shape_inference.infer_shapes(model) + base_dir = os.path.dirname(input_path) + onnx.load_external_data_for_model(model, base_dir) + else: + model = onnx.load(input_path) + + e = Extractor(model) + extracted = e.extract_model(input_names, output_names) + + if extracted.ByteSize() > onnx.checker.MAXIMUM_PROTOBUF: + location = os.path.basename(output_path) + ".data" + onnx.save(extracted, output_path, save_as_external_data=True, location=location) + else: + onnx.save(extracted, output_path) + + if check_model: + onnx.checker.check_model(output_path) + + +def _tar_members_filter( + tar: tarfile.TarFile, base: str | os.PathLike +) -> list[tarfile.TarInfo]: + """Check that the content of ``tar`` will be extracted safely + + Args: + tar: The tarball file + base: The directory where the tarball will be extracted + + Returns: + list of tarball members + """ + result = [] + for member in tar: + member_path = os.path.join(base, member.name) + abs_base = os.path.abspath(base) + abs_member = os.path.abspath(member_path) + if not abs_member.startswith(abs_base): + raise RuntimeError( + f"The tarball member {member_path} in downloading model contains " + f"directory traversal sequence which may contain harmful payload." + ) + if member.issym() or member.islnk(): + raise RuntimeError( + f"The tarball member {member_path} in downloading model contains " + f"symbolic links which may contain harmful payload." + ) + result.append(member) + return result + + +def _extract_model_safe( + model_tar_path: str | os.PathLike, local_model_with_data_dir_path: str | os.PathLike +) -> None: + """Safely extracts a tar file to a specified directory. + + This function ensures that the extraction process mitigates against + directory traversal vulnerabilities by validating or sanitizing paths + within the tar file. It also provides compatibility for different versions + of the tarfile module by checking for the availability of certain attributes + or methods before invoking them. + + Args: + model_tar_path: The path to the tar file to be extracted. + local_model_with_data_dir_path: The directory path where the tar file + contents will be extracted to. + """ + with tarfile.open(model_tar_path) as model_with_data_zipped: + # Mitigate tarball directory traversal risks + if hasattr(tarfile, "data_filter"): + model_with_data_zipped.extractall( + path=local_model_with_data_dir_path, filter="data" + ) + else: + model_with_data_zipped.extractall( + path=local_model_with_data_dir_path, + members=_tar_members_filter( + model_with_data_zipped, local_model_with_data_dir_path + ), + ) diff --git a/pythonProject/.venv/Lib/site-packages/onnx/version.py b/pythonProject/.venv/Lib/site-packages/onnx/version.py new file mode 100644 index 0000000000000000000000000000000000000000..ea5d18012b75465dea41ddb96f16be0a67f21689 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/version.py @@ -0,0 +1,5 @@ +# This file is generated by setup.py. DO NOT EDIT! + + +version = "1.19.0" +git_version = "57b9c6a4f6eebb09ae1b34cfb632078518f72832" diff --git a/pythonProject/.venv/Lib/site-packages/onnx/version_converter.py b/pythonProject/.venv/Lib/site-packages/onnx/version_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..40b4cab5955d41a03deb0b57ef99c9bc0f4380e1 --- /dev/null +++ b/pythonProject/.venv/Lib/site-packages/onnx/version_converter.py @@ -0,0 +1,43 @@ +# Copyright (c) ONNX Project Contributors + +# SPDX-License-Identifier: Apache-2.0 +"""onnx version converter + +This enables users to convert their models between different opsets within the +default domain ("" or "ai.onnx"). +""" + +from __future__ import annotations + +import onnx +import onnx.onnx_cpp2py_export.version_converter as C # noqa: N812 +from onnx import ModelProto + + +def convert_version(model: ModelProto, target_version: int) -> ModelProto: + """Convert opset version of the ModelProto. + + Arguments: + model: Model. + target_version: Target opset version. + + Returns: + Converted model. + + Raises: + RuntimeError when some necessary conversion is not supported. + """ + if not isinstance(model, ModelProto): + raise TypeError( + f"VersionConverter only accepts ModelProto as model, incorrect type: {type(model)}" + ) + if not isinstance(target_version, int): + raise TypeError( + f"VersionConverter only accepts int as target_version, incorrect type: {type(target_version)}" + ) + model_str = model.SerializeToString() + converted_model_str = C.convert_version(model_str, target_version) + return onnx.load_from_string(converted_model_str) + + +ConvertError = C.ConvertError