diff --git a/wemm/lib/python3.10/site-packages/GPUtil-1.4.0.dist-info/top_level.txt b/wemm/lib/python3.10/site-packages/GPUtil-1.4.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..364bc32da47197fb5a1f51f705b57090509499b4 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/GPUtil-1.4.0.dist-info/top_level.txt @@ -0,0 +1 @@ +GPUtil diff --git a/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/INSTALLER b/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/RECORD b/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..0bb6eb12d10a7ced3414e4dd3559a4c664e320c9 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/RECORD @@ -0,0 +1,11 @@ +aiosignal-1.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +aiosignal-1.3.2.dist-info/LICENSE,sha256=b9UkPpLdf5jsacesN3co50kFcJ_1J6W_mNbQJjwE9bY,11332 +aiosignal-1.3.2.dist-info/METADATA,sha256=TeI_xgZ191qgx37rviEnpMWC0QnYsg_j9EGVivNqqjc,3753 +aiosignal-1.3.2.dist-info/RECORD,, +aiosignal-1.3.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +aiosignal-1.3.2.dist-info/WHEEL,sha256=pxeNX5JdtCe58PUSYP9upmc7jdRPgvT0Gm9kb1SHlVw,109 +aiosignal-1.3.2.dist-info/top_level.txt,sha256=z45aNOKGDdrI1roqZY3BGXQ22kJFPHBmVdwtLYLtXC0,10 +aiosignal/__init__.py,sha256=1oIrRl6kNpqFh32e7HfMFbMV_35v8sqJJFfnuKgmtEU,867 +aiosignal/__init__.pyi,sha256=xeCddYSS8fZAkz8S4HuKSR2IDe3N7RW_LKcXDPPA1Xk,311 +aiosignal/__pycache__/__init__.cpython-310.pyc,, +aiosignal/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/REQUESTED b/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/WHEEL b/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..104f3874635f24f0d2918dfeaf6a59652274460c --- /dev/null +++ b/wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.6.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/INSTALLER b/wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/REQUESTED b/wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/top_level.txt b/wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/License.txt b/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/RECORD b/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..73aee59a1b3ca34948a66e8312c1895a71c09fe6 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/RECORD @@ -0,0 +1,23 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/include/cusolverDn.h,sha256=8KUcqUxWPr8jpz3ZVpTB6I3IXMme1ok7E7vi9XXKRzk,147406 +nvidia/cusolver/include/cusolverMg.h,sha256=N8989nnS2BleeMyuftbQgBDJ4sMAkLPSnmy_S_7fxng,11549 +nvidia/cusolver/include/cusolverRf.h,sha256=7BZfWeuMJ8w1Pz4iZeGmwvDZbDNNq0ivG5MHtiATtls,14292 +nvidia/cusolver/include/cusolverSp.h,sha256=8fev0XawDBd0xrOxUlQ3WhclKlUuVAT64zKxwnP8iT0,32561 +nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h,sha256=rTuS0rxwGV3bAz50ua59WVPQ9SvlijORj732oPejoCk,37495 +nvidia/cusolver/include/cusolver_common.h,sha256=oyltrdGL5cpIPe3oJWxQ95XEprTPAohOG8XHBB84hRM,8824 +nvidia/cusolver/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusolver/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusolver/lib/libcusolver.so.11,sha256=6AWRIxTk0qxMYVazEbN11wRgK7_Mcz1OkxS6FGQ6bd4,234922936 +nvidia/cusolver/lib/libcusolverMg.so.11,sha256=-fxKTTDSdUr_N679R85-NfpI0GDLO2IoTmUZm4utEeE,141988264 +nvidia_cusolver_cu11-11.4.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cusolver_cu11-11.4.0.1.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cusolver_cu11-11.4.0.1.dist-info/METADATA,sha256=orEmzZBFkVhXyBgbnGKGbaI0ClyUFfTUhuuG_djbkqY,1551 +nvidia_cusolver_cu11-11.4.0.1.dist-info/RECORD,, +nvidia_cusolver_cu11-11.4.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia_cusolver_cu11-11.4.0.1.dist-info/WHEEL,sha256=v6cGNql5q3Lw8M9MsG2Kk4-SoHxxNwGgZHlg0h0twcI,115 +nvidia_cusolver_cu11-11.4.0.1.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/REQUESTED b/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/top_level.txt b/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/aggregation.py b/wemm/lib/python3.10/site-packages/torchmetrics/aggregation.py new file mode 100644 index 0000000000000000000000000000000000000000..a151d06da5f53726c0b1852aa807dedde53980a7 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/aggregation.py @@ -0,0 +1,408 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import warnings +from typing import Any, Callable, List, Union + +import torch +from torch import Tensor + +from torchmetrics.metric import Metric +from torchmetrics.utilities.data import dim_zero_cat + + +class BaseAggregator(Metric): + """Base class for aggregation metrics. + + Args: + fn: string specifying the reduction function + default_value: default tensor value to use for the metric state + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + """ + + value: Tensor + is_differentiable = None + higher_is_better = None + full_state_update = False + + def __init__( + self, + fn: Union[Callable, str], + default_value: Union[Tensor, List], + nan_strategy: Union[str, float] = "error", + **kwargs: Any, + ): + super().__init__(**kwargs) + allowed_nan_strategy = ("error", "warn", "ignore") + if nan_strategy not in allowed_nan_strategy and not isinstance(nan_strategy, float): + raise ValueError( + f"Arg `nan_strategy` should either be a float or one of {allowed_nan_strategy}" + f" but got {nan_strategy}." + ) + + self.nan_strategy = nan_strategy + self.add_state("value", default=default_value, dist_reduce_fx=fn) + + def _cast_and_nan_check_input(self, x: Union[float, Tensor]) -> Tensor: + """Converts input x to a tensor if not already and afterwards checks for nans that either give an error, + warning or just ignored.""" + if not isinstance(x, Tensor): + x = torch.as_tensor(x, dtype=torch.float32, device=self.device) + + nans = torch.isnan(x) + if nans.any(): + if self.nan_strategy == "error": + raise RuntimeError("Encounted `nan` values in tensor") + if self.nan_strategy == "warn": + warnings.warn("Encounted `nan` values in tensor. Will be removed.", UserWarning) + x = x[~nans] + elif self.nan_strategy == "ignore": + x = x[~nans] + else: + x[nans] = self.nan_strategy + + return x.float() + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Overwrite in child class.""" + pass + + def compute(self) -> Tensor: + """Compute the aggregated value.""" + return self.value + + +class MaxMetric(BaseAggregator): + """Aggregate a stream of value into their maximum value. + + As input to ``forward`` and ``update`` the metric accepts the following input + + - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with + arbitary shape ``(...,)``. + + As output of `forward` and `compute` the metric returns the following output + + - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated maximum value over all inputs received + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> import torch + >>> from torchmetrics import MaxMetric + >>> metric = MaxMetric() + >>> metric.update(1) + >>> metric.update(torch.tensor([2, 3])) + >>> metric.compute() + tensor(3.) + """ + + full_state_update = True + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + **kwargs: Any, + ): + super().__init__( + "max", + -torch.tensor(float("inf")), + nan_strategy, + **kwargs, + ) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if value.numel(): # make sure tensor not empty + self.value = torch.max(self.value, torch.max(value)) + + +class MinMetric(BaseAggregator): + """Aggregate a stream of value into their minimum value. + + As input to ``forward`` and ``update`` the metric accepts the following input + + - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with + arbitary shape ``(...,)``. + + As output of `forward` and `compute` the metric returns the following output + + - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated minimum value over all inputs received + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> import torch + >>> from torchmetrics import MinMetric + >>> metric = MinMetric() + >>> metric.update(1) + >>> metric.update(torch.tensor([2, 3])) + >>> metric.compute() + tensor(1.) + """ + + full_state_update = True + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + **kwargs: Any, + ): + super().__init__( + "min", + torch.tensor(float("inf")), + nan_strategy, + **kwargs, + ) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if value.numel(): # make sure tensor not empty + self.value = torch.min(self.value, torch.min(value)) + + +class SumMetric(BaseAggregator): + """Aggregate a stream of value into their sum. + + As input to ``forward`` and ``update`` the metric accepts the following input + + - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with + arbitary shape ``(...,)``. + + As output of `forward` and `compute` the metric returns the following output + + - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated sum over all inputs received + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> import torch + >>> from torchmetrics import SumMetric + >>> metric = SumMetric() + >>> metric.update(1) + >>> metric.update(torch.tensor([2, 3])) + >>> metric.compute() + tensor(6.) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + **kwargs: Any, + ): + super().__init__( + "sum", + torch.tensor(0.0), + nan_strategy, + **kwargs, + ) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if value.numel(): + self.value += value.sum() + + +class CatMetric(BaseAggregator): + """Concatenate a stream of values. + + As input to ``forward`` and ``update`` the metric accepts the following input + + - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with + arbitary shape ``(...,)``. + + As output of `forward` and `compute` the metric returns the following output + + - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with concatenated values over all input received + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> import torch + >>> from torchmetrics import CatMetric + >>> metric = CatMetric() + >>> metric.update(1) + >>> metric.update(torch.tensor([2, 3])) + >>> metric.compute() + tensor([1., 2., 3.]) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + **kwargs: Any, + ): + super().__init__("cat", [], nan_strategy, **kwargs) + + def update(self, value: Union[float, Tensor]) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + """ + value = self._cast_and_nan_check_input(value) + if value.numel(): + self.value.append(value) + + def compute(self) -> Tensor: + """Compute the aggregated value.""" + if isinstance(self.value, list) and self.value: + return dim_zero_cat(self.value) + return self.value + + +class MeanMetric(BaseAggregator): + """Aggregate a stream of value into their mean value. + + As input to ``forward`` and ``update`` the metric accepts the following input + + - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with + arbitary shape ``(...,)``. + - ``weight`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float value with + arbitary shape ``(...,)``. Needs to be broadcastable with the shape of ``value`` tensor. + + As output of `forward` and `compute` the metric returns the following output + + - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated (weighted) mean over all inputs received + + Args: + nan_strategy: options: + - ``'error'``: if any `nan` values are encounted will give a RuntimeError + - ``'warn'``: if any `nan` values are encounted will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impude any `nan` values with this value + + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Raises: + ValueError: + If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float + + Example: + >>> from torchmetrics import MeanMetric + >>> metric = MeanMetric() + >>> metric.update(1) + >>> metric.update(torch.tensor([2, 3])) + >>> metric.compute() + tensor(2.) + """ + + def __init__( + self, + nan_strategy: Union[str, float] = "warn", + **kwargs: Any, + ): + super().__init__( + "sum", + torch.tensor(0.0), + nan_strategy, + **kwargs, + ) + self.add_state("weight", default=torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: # type: ignore + """Update state with data. + + Args: + value: Either a float or tensor containing data. Additional tensor + dimensions will be flattened + weight: Either a float or tensor containing weights for calculating + the average. Shape of weight should be able to broadcast with + the shape of `value`. Default to `1.0` corresponding to simple + harmonic average. + """ + value = self._cast_and_nan_check_input(value) + weight = self._cast_and_nan_check_input(weight) + + if value.numel() == 0: + return + # broadcast weight to value shape + weight = torch.broadcast_to(weight, value.shape) + self.value += (value * weight).sum() + self.weight += weight.sum() + + def compute(self) -> Tensor: + """Compute the aggregated value.""" + return self.value / self.weight diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/__init__.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..60069403e8a4bd02db1efea0be3d2a0ad14b2185 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/__init__.py @@ -0,0 +1,191 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from torchmetrics.classification.confusion_matrix import ( # isort:skip + BinaryConfusionMatrix, + ConfusionMatrix, + MulticlassConfusionMatrix, + MultilabelConfusionMatrix, +) +from torchmetrics.classification.precision_recall_curve import ( # isort:skip + PrecisionRecallCurve, + BinaryPrecisionRecallCurve, + MulticlassPrecisionRecallCurve, + MultilabelPrecisionRecallCurve, +) +from torchmetrics.classification.stat_scores import ( # isort:skip + BinaryStatScores, + MulticlassStatScores, + MultilabelStatScores, + StatScores, +) +from torchmetrics.classification.accuracy import Accuracy, BinaryAccuracy, MulticlassAccuracy, MultilabelAccuracy +from torchmetrics.classification.auroc import AUROC, BinaryAUROC, MulticlassAUROC, MultilabelAUROC +from torchmetrics.classification.average_precision import ( + AveragePrecision, + BinaryAveragePrecision, + MulticlassAveragePrecision, + MultilabelAveragePrecision, +) +from torchmetrics.classification.calibration_error import ( + BinaryCalibrationError, + CalibrationError, + MulticlassCalibrationError, +) +from torchmetrics.classification.cohen_kappa import BinaryCohenKappa, CohenKappa, MulticlassCohenKappa +from torchmetrics.classification.dice import Dice +from torchmetrics.classification.exact_match import ExactMatch, MulticlassExactMatch, MultilabelExactMatch +from torchmetrics.classification.f_beta import ( + BinaryF1Score, + BinaryFBetaScore, + F1Score, + FBetaScore, + MulticlassF1Score, + MulticlassFBetaScore, + MultilabelF1Score, + MultilabelFBetaScore, +) +from torchmetrics.classification.hamming import ( + BinaryHammingDistance, + HammingDistance, + MulticlassHammingDistance, + MultilabelHammingDistance, +) +from torchmetrics.classification.hinge import BinaryHingeLoss, HingeLoss, MulticlassHingeLoss +from torchmetrics.classification.jaccard import ( + BinaryJaccardIndex, + JaccardIndex, + MulticlassJaccardIndex, + MultilabelJaccardIndex, +) +from torchmetrics.classification.matthews_corrcoef import ( + BinaryMatthewsCorrCoef, + MatthewsCorrCoef, + MulticlassMatthewsCorrCoef, + MultilabelMatthewsCorrCoef, +) +from torchmetrics.classification.precision_recall import ( + BinaryPrecision, + BinaryRecall, + MulticlassPrecision, + MulticlassRecall, + MultilabelPrecision, + MultilabelRecall, + Precision, + Recall, +) +from torchmetrics.classification.ranking import ( + MultilabelCoverageError, + MultilabelRankingAveragePrecision, + MultilabelRankingLoss, +) +from torchmetrics.classification.recall_at_fixed_precision import ( + BinaryRecallAtFixedPrecision, + MulticlassRecallAtFixedPrecision, + MultilabelRecallAtFixedPrecision, +) +from torchmetrics.classification.roc import ROC, BinaryROC, MulticlassROC, MultilabelROC +from torchmetrics.classification.specificity import ( + BinarySpecificity, + MulticlassSpecificity, + MultilabelSpecificity, + Specificity, +) + +__all__ = [ + "BinaryConfusionMatrix", + "ConfusionMatrix", + "MulticlassConfusionMatrix", + "MultilabelConfusionMatrix", + "PrecisionRecallCurve", + "BinaryPrecisionRecallCurve", + "MulticlassPrecisionRecallCurve", + "MultilabelPrecisionRecallCurve", + "BinaryStatScores", + "MulticlassStatScores", + "MultilabelStatScores", + "StatScores", + "Accuracy", + "BinaryAccuracy", + "MulticlassAccuracy", + "MultilabelAccuracy", + "AUROC", + "BinaryAUROC", + "MulticlassAUROC", + "MultilabelAUROC", + "AveragePrecision", + "BinaryAveragePrecision", + "MulticlassAveragePrecision", + "MultilabelAveragePrecision", + "BinnedAveragePrecision", + "BinnedPrecisionRecallCurve", + "BinnedRecallAtFixedPrecision", + "BinaryCalibrationError", + "CalibrationError", + "MulticlassCalibrationError", + "BinaryCohenKappa", + "CohenKappa", + "MulticlassCohenKappa", + "Dice", + "ExactMatch", + "MulticlassExactMatch", + "MultilabelExactMatch", + "BinaryF1Score", + "BinaryFBetaScore", + "F1Score", + "FBetaScore", + "MulticlassF1Score", + "MulticlassFBetaScore", + "MultilabelF1Score", + "MultilabelFBetaScore", + "BinaryHammingDistance", + "HammingDistance", + "MulticlassHammingDistance", + "MultilabelHammingDistance", + "BinaryHingeLoss", + "HingeLoss", + "MulticlassHingeLoss", + "BinaryJaccardIndex", + "JaccardIndex", + "MulticlassJaccardIndex", + "MultilabelJaccardIndex", + "BinaryMatthewsCorrCoef", + "MatthewsCorrCoef", + "MulticlassMatthewsCorrCoef", + "MultilabelMatthewsCorrCoef", + "BinaryPrecision", + "BinaryRecall", + "MulticlassPrecision", + "MulticlassRecall", + "MultilabelPrecision", + "MultilabelRecall", + "Precision", + "Recall", + "CoverageError", + "LabelRankingAveragePrecision", + "LabelRankingLoss", + "MultilabelCoverageError", + "MultilabelRankingAveragePrecision", + "MultilabelRankingLoss", + "BinaryRecallAtFixedPrecision", + "MulticlassRecallAtFixedPrecision", + "MultilabelRecallAtFixedPrecision", + "ROC", + "BinaryROC", + "MulticlassROC", + "MultilabelROC", + "BinarySpecificity", + "MulticlassSpecificity", + "MultilabelSpecificity", + "Specificity", +] diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/auroc.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/auroc.py new file mode 100644 index 0000000000000000000000000000000000000000..dacec440d08a7644ea10681df63e9564daf8b242 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/auroc.py @@ -0,0 +1,372 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Union + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.classification.precision_recall_curve import ( + BinaryPrecisionRecallCurve, + MulticlassPrecisionRecallCurve, + MultilabelPrecisionRecallCurve, +) +from torchmetrics.functional.classification.auroc import ( + _binary_auroc_arg_validation, + _binary_auroc_compute, + _multiclass_auroc_arg_validation, + _multiclass_auroc_compute, + _multilabel_auroc_arg_validation, + _multilabel_auroc_compute, +) +from torchmetrics.metric import Metric +from torchmetrics.utilities.data import dim_zero_cat + + +class BinaryAUROC(BinaryPrecisionRecallCurve): + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for binary tasks. The AUROC + score summarizes the ROC curve into an single number that describes the performance of a model for multiple + thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 + corresponds to random guessing. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for + each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and + therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the + positive class. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``b_auroc`` (:class:`~torch.Tensor`): A single scalar with the auroc score. + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a + binned version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will + activate the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the + `thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds})` (constant memory). + + Args: + max_fpr: If not ``None``, calculates standardized partial AUC over the range ``[0, max_fpr]``. + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import BinaryAUROC + >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) + >>> target = torch.tensor([0, 1, 1, 0]) + >>> metric = BinaryAUROC(thresholds=None) + >>> metric(preds, target) + tensor(0.5000) + >>> b_auroc = BinaryAUROC(thresholds=5) + >>> b_auroc(preds, target) + tensor(0.5000) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + max_fpr: Optional[float] = None, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs) + if validate_args: + _binary_auroc_arg_validation(max_fpr, thresholds, ignore_index) + self.max_fpr = max_fpr + + def compute(self) -> Tensor: + if self.thresholds is None: + state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] + else: + state = self.confmat + return _binary_auroc_compute(state, self.thresholds, self.max_fpr) + + +class MulticlassAUROC(MulticlassPrecisionRecallCurve): + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multiclass tasks. The AUROC + score summarizes the ROC curve into an single number that describes the performance of a model for multiple + thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 + corresponds to random guessing. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits + for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto + apply softmax per sample. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and + therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified). + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mc_auroc`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will + be returned with auroc score per class. If `average="macro"|"weighted"` then a single scalar is returned. + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). + + Args: + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over classes. Should be one of the following: + + - ``macro``: Calculate score for each class and average them + - ``weighted``: Calculates score for each class and computes weighted average using their support + - ``"none"`` or ``None``: Calculates score for each class and applies no reduction + + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import MulticlassAUROC + >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> metric = MulticlassAUROC(num_classes=5, average="macro", thresholds=None) + >>> metric(preds, target) + tensor(0.5333) + >>> mc_auroc = MulticlassAUROC(num_classes=5, average=None, thresholds=None) + >>> mc_auroc(preds, target) + tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000]) + >>> mc_auroc = MulticlassAUROC(num_classes=5, average="macro", thresholds=5) + >>> mc_auroc(preds, target) + tensor(0.5333) + >>> mc_auroc = MulticlassAUROC(num_classes=5, average=None, thresholds=5) + >>> mc_auroc(preds, target) + tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000]) + """ + + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + num_classes: int, + average: Optional[Literal["macro", "weighted", "none"]] = "macro", + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__( + num_classes=num_classes, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs + ) + if validate_args: + _multiclass_auroc_arg_validation(num_classes, average, thresholds, ignore_index) + self.average = average + self.validate_args = validate_args + + def compute(self) -> Tensor: + if self.thresholds is None: + state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] + else: + state = self.confmat + return _multiclass_auroc_compute(state, self.num_classes, self.average, self.thresholds) + + +class MultilabelAUROC(MultilabelPrecisionRecallCurve): + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multilabel tasks. The AUROC + score summarizes the ROC curve into an single number that describes the performance of a model for multiple + thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 + corresponds to random guessing. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits + for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto + apply sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` containing ground truth labels, and + therefore only contain {0,1} values (except if `ignore_index` is specified). + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``ml_auroc`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will + be returned with auroc score per class. If `average="micro|macro"|"weighted"` then a single scalar is returned. + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). + + Args: + num_labels: Integer specifing the number of labels + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum score over all labels + - ``macro``: Calculate score for each label and average them + - ``weighted``: Calculates score for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates score for each label and applies no reduction + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import MultilabelAUROC + >>> preds = torch.tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = torch.tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) + >>> ml_auroc = MultilabelAUROC(num_labels=3, average="macro", thresholds=None) + >>> ml_auroc(preds, target) + tensor(0.6528) + >>> ml_auroc = MultilabelAUROC(num_labels=3, average=None, thresholds=None) + >>> ml_auroc(preds, target) + tensor([0.6250, 0.5000, 0.8333]) + >>> ml_auroc = MultilabelAUROC(num_labels=3, average="macro", thresholds=5) + >>> ml_auroc(preds, target) + tensor(0.6528) + >>> ml_auroc = MultilabelAUROC(num_labels=3, average=None, thresholds=5) + >>> ml_auroc(preds, target) + tensor([0.6250, 0.5000, 0.8333]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + num_labels: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__( + num_labels=num_labels, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs + ) + if validate_args: + _multilabel_auroc_arg_validation(num_labels, average, thresholds, ignore_index) + self.average = average + self.validate_args = validate_args + + def compute(self) -> Tensor: + if self.thresholds is None: + state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] + else: + state = self.confmat + return _multilabel_auroc_compute(state, self.num_labels, self.average, self.thresholds, self.ignore_index) + + +class AUROC: + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_). The AUROC score summarizes the + ROC curve into an single number that describes the performance of a model for multiple thresholds at the same + time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 corresponds to random guessing. + + This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :mod:`BinaryAUROC`, :mod:`MulticlassAUROC` and :mod:`MultilabelAUROC` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) + >>> target = torch.tensor([0, 0, 1, 1, 1]) + >>> auroc = AUROC(task="binary") + >>> auroc(preds, target) + tensor(0.5000) + + >>> preds = torch.tensor([[0.90, 0.05, 0.05], + ... [0.05, 0.90, 0.05], + ... [0.05, 0.05, 0.90], + ... [0.85, 0.05, 0.10], + ... [0.10, 0.10, 0.80]]) + >>> target = torch.tensor([0, 1, 1, 2, 2]) + >>> auroc = AUROC(task="multiclass", num_classes=3) + >>> auroc(preds, target) + tensor(0.7778) + """ + + def __new__( + cls, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], Tensor]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["macro", "weighted", "none"]] = "macro", + max_fpr: Optional[float] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> Metric: + kwargs.update(dict(thresholds=thresholds, ignore_index=ignore_index, validate_args=validate_args)) + if task == "binary": + return BinaryAUROC(max_fpr, **kwargs) + if task == "multiclass": + assert isinstance(num_classes, int) + return MulticlassAUROC(num_classes, average, **kwargs) + if task == "multilabel": + assert isinstance(num_labels, int) + return MultilabelAUROC(num_labels, average, **kwargs) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/average_precision.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/average_precision.py new file mode 100644 index 0000000000000000000000000000000000000000..18cdfcaa95e5fd55ad19d0c8f9442e134a70e171 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/average_precision.py @@ -0,0 +1,376 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Union + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.classification.precision_recall_curve import ( + BinaryPrecisionRecallCurve, + MulticlassPrecisionRecallCurve, + MultilabelPrecisionRecallCurve, +) +from torchmetrics.functional.classification.average_precision import ( + _binary_average_precision_compute, + _multiclass_average_precision_arg_validation, + _multiclass_average_precision_compute, + _multilabel_average_precision_arg_validation, + _multilabel_average_precision_compute, +) +from torchmetrics.metric import Metric +from torchmetrics.utilities.data import dim_zero_cat + + +class BinaryAveragePrecision(BinaryPrecisionRecallCurve): + r"""Computes the average precision (AP) score for binary tasks. The AP score summarizes a precision-recall curve + as an weighted mean of precisions at each threshold, with the difference in recall from the previous threshold + as weight: + + .. math:: + AP = \sum_{n} (R_n - R_{n-1}) P_n + + where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is + equivalent to the area under the precision-recall curve (AUPRC). + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for + each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and + therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the + positive class. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``bap`` (:class:`~torch.Tensor`): A single scalar with the average precision score + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds})` (constant memory). + + Args: + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import BinaryAveragePrecision + >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) + >>> target = torch.tensor([0, 1, 1, 0]) + >>> metric = BinaryAveragePrecision(thresholds=None) + >>> metric(preds, target) + tensor(0.5833) + >>> bap = BinaryAveragePrecision(thresholds=5) + >>> bap(preds, target) + tensor(0.6667) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def compute(self) -> Tensor: + if self.thresholds is None: + state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] + else: + state = self.confmat + return _binary_average_precision_compute(state, self.thresholds) + + +class MulticlassAveragePrecision(MulticlassPrecisionRecallCurve): + r"""Computes the average precision (AP) score for binary tasks. The AP score summarizes a precision-recall curve + as an weighted mean of precisions at each threshold, with the difference in recall from the previous threshold + as weight: + + .. math:: + AP = \sum_{n} (R_n - R_{n-1}) P_n + + where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is + equivalent to the area under the precision-recall curve (AUPRC). + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits + for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto + apply softmax per sample. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and + therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified). + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mcap`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be + returned with AP score per class. If `average="macro"|"weighted"` then a single scalar is returned. + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). + + Args: + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over classes. Should be one of the following: + + - ``macro``: Calculate score for each class and average them + - ``weighted``: Calculates score for each class and computes weighted average using their support + - ``"none"`` or ``None``: Calculates score for each class and applies no reduction + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import MulticlassAveragePrecision + >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> metric = MulticlassAveragePrecision(num_classes=5, average="macro", thresholds=None) + >>> metric(preds, target) + tensor(0.6250) + >>> mcap = MulticlassAveragePrecision(num_classes=5, average=None, thresholds=None) + >>> mcap(preds, target) + tensor([1.0000, 1.0000, 0.2500, 0.2500, nan]) + >>> mcap = MulticlassAveragePrecision(num_classes=5, average="macro", thresholds=5) + >>> mcap(preds, target) + tensor(0.5000) + >>> mcap = MulticlassAveragePrecision(num_classes=5, average=None, thresholds=5) + >>> mcap(preds, target) + tensor([1.0000, 1.0000, 0.2500, 0.2500, -0.0000]) + """ + + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + num_classes: int, + average: Optional[Literal["macro", "weighted", "none"]] = "macro", + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__( + num_classes=num_classes, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs + ) + if validate_args: + _multiclass_average_precision_arg_validation(num_classes, average, thresholds, ignore_index) + self.average = average + self.validate_args = validate_args + + def compute(self) -> Tensor: + if self.thresholds is None: + state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] + else: + state = self.confmat + return _multiclass_average_precision_compute(state, self.num_classes, self.average, self.thresholds) + + +class MultilabelAveragePrecision(MultilabelPrecisionRecallCurve): + r"""Computes the average precision (AP) score for binary tasks. The AP score summarizes a precision-recall curve + as an weighted mean of precisions at each threshold, with the difference in recall from the previous threshold + as weight: + + .. math:: + AP = \sum_{n} (R_n - R_{n-1}) P_n + + where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is + equivalent to the area under the precision-recall curve (AUPRC). + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits + for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto + apply sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` containing ground truth labels, and + therefore only contain {0,1} values (except if `ignore_index` is specified). + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mlap`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be + returned with AP score per class. If `average="micro|macro"|"weighted"` then a single scalar is returned. + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned + version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate + the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the + `thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). + + Args: + num_labels: Integer specifing the number of labels + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum score over all labels + - ``macro``: Calculate score for each label and average them + - ``weighted``: Calculates score for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates score for each label and applies no reduction + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import MultilabelAveragePrecision + >>> preds = torch.tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = torch.tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) + >>> metric = MultilabelAveragePrecision(num_labels=3, average="macro", thresholds=None) + >>> metric(preds, target) + tensor(0.7500) + >>> mlap = MultilabelAveragePrecision(num_labels=3, average=None, thresholds=None) + >>> mlap(preds, target) + tensor([0.7500, 0.5833, 0.9167]) + >>> mlap = MultilabelAveragePrecision(num_labels=3, average="macro", thresholds=5) + >>> mlap(preds, target) + tensor(0.7778) + >>> mlap = MultilabelAveragePrecision(num_labels=3, average=None, thresholds=5) + >>> mlap(preds, target) + tensor([0.7500, 0.6667, 0.9167]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + num_labels: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__( + num_labels=num_labels, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs + ) + if validate_args: + _multilabel_average_precision_arg_validation(num_labels, average, thresholds, ignore_index) + self.average = average + self.validate_args = validate_args + + def compute(self) -> Tensor: + if self.thresholds is None: + state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] + else: + state = self.confmat + return _multilabel_average_precision_compute( + state, self.num_labels, self.average, self.thresholds, self.ignore_index + ) + + +class AveragePrecision: + r"""Computes the average precision (AP) score. The AP score summarizes a precision-recall curve as an weighted + mean of precisions at each threshold, with the difference in recall from the previous threshold as weight: + + .. math:: + AP = \sum_{n} (R_n - R_{n-1}) P_n + + where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is + equivalent to the area under the precision-recall curve (AUPRC). + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :mod:`BinaryAveragePrecision`, :mod:`MulticlassAveragePrecision` and :mod:`MultilabelAveragePrecision` + for the specific details of each argument influence and examples. + + Legacy Example: + >>> pred = torch.tensor([0, 0.1, 0.8, 0.4]) + >>> target = torch.tensor([0, 1, 1, 1]) + >>> average_precision = AveragePrecision(task="binary") + >>> average_precision(pred, target) + tensor(1.) + + >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> average_precision = AveragePrecision(task="multiclass", num_classes=5, average=None) + >>> average_precision(pred, target) + tensor([1.0000, 1.0000, 0.2500, 0.2500, nan]) + """ + + def __new__( + cls, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], Tensor]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["macro", "weighted", "none"]] = "macro", + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> Metric: + kwargs.update(dict(thresholds=thresholds, ignore_index=ignore_index, validate_args=validate_args)) + if task == "binary": + return BinaryAveragePrecision(**kwargs) + if task == "multiclass": + assert isinstance(num_classes, int) + return MulticlassAveragePrecision(num_classes, average, **kwargs) + if task == "multilabel": + assert isinstance(num_labels, int) + return MultilabelAveragePrecision(num_labels, average, **kwargs) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/calibration_error.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/calibration_error.py new file mode 100644 index 0000000000000000000000000000000000000000..61ec30d9217c1015a34387b7acfe1d6b1c85d408 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/calibration_error.py @@ -0,0 +1,277 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.calibration_error import ( + _binary_calibration_error_arg_validation, + _binary_calibration_error_tensor_validation, + _binary_calibration_error_update, + _binary_confusion_matrix_format, + _ce_compute, + _multiclass_calibration_error_arg_validation, + _multiclass_calibration_error_tensor_validation, + _multiclass_calibration_error_update, + _multiclass_confusion_matrix_format, +) +from torchmetrics.metric import Metric +from torchmetrics.utilities.data import dim_zero_cat + + +class BinaryCalibrationError(Metric): + r"""`Top-label Calibration Error`_ for binary tasks. The expected calibration error can be used to quantify how + well a given model is calibrated e.g. how well the predicted output probabilities of the model matches the + actual probabilities of the ground truth distribution. + + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + .. math:: + \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)} + + .. math:: + \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)} + + .. math:: + \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)} + + Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of + predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed + in an uniform way in the [0,1] range. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for + each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and + therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the + positive class. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``bce`` (:class:`~torch.Tensor`): A scalar tensor containing the calibration error + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + n_bins: Number of bins to use when computing the metric. + norm: Norm used to compare empirical and expected probability bins. + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import BinaryCalibrationError + >>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75]) + >>> target = torch.tensor([0, 0, 1, 1, 1]) + >>> metric = BinaryCalibrationError(n_bins=2, norm='l1') + >>> metric(preds, target) + tensor(0.2900) + >>> bce = BinaryCalibrationError(n_bins=2, norm='l2') + >>> bce(preds, target) + tensor(0.2918) + >>> bce = BinaryCalibrationError(n_bins=2, norm='max') + >>> bce(preds, target) + tensor(0.3167) + """ + is_differentiable: bool = False + higher_is_better: bool = False + full_state_update: bool = False + + def __init__( + self, + n_bins: int = 15, + norm: Literal["l1", "l2", "max"] = "l1", + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _binary_calibration_error_arg_validation(n_bins, norm, ignore_index) + self.validate_args = validate_args + self.n_bins = n_bins + self.norm = norm + self.ignore_index = ignore_index + self.add_state("confidences", [], dist_reduce_fx="cat") + self.add_state("accuracies", [], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + if self.validate_args: + _binary_calibration_error_tensor_validation(preds, target, self.ignore_index) + preds, target = _binary_confusion_matrix_format( + preds, target, threshold=0.0, ignore_index=self.ignore_index, convert_to_labels=False + ) + confidences, accuracies = _binary_calibration_error_update(preds, target) + self.confidences.append(confidences) + self.accuracies.append(accuracies) + + def compute(self) -> Tensor: + confidences = dim_zero_cat(self.confidences) + accuracies = dim_zero_cat(self.accuracies) + return _ce_compute(confidences, accuracies, self.n_bins, norm=self.norm) + + +class MulticlassCalibrationError(Metric): + r"""`Top-label Calibration Error`_ for multiclass tasks. The expected calibration error can be used to quantify + how well a given model is calibrated e.g. how well the predicted output probabilities of the model matches the + actual probabilities of the ground truth distribution. + + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + .. math:: + \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)} + + .. math:: + \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)} + + .. math:: + \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)} + + Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of + predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed + in an uniform way in the [0,1] range. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits for + each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + softmax per sample. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and + therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified). + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mcce`` (:class:`~torch.Tensor`): A scalar tensor containing the calibration error + + Args: + num_classes: Integer specifing the number of classes + n_bins: Number of bins to use when computing the metric. + norm: Norm used to compare empirical and expected probability bins. + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import MulticlassCalibrationError + >>> preds = torch.tensor([[0.25, 0.20, 0.55], + ... [0.55, 0.05, 0.40], + ... [0.10, 0.30, 0.60], + ... [0.90, 0.05, 0.05]]) + >>> target = torch.tensor([0, 1, 2, 0]) + >>> metric = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='l1') + >>> metric(preds, target) + tensor(0.2000) + >>> mcce = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='l2') + >>> mcce(preds, target) + tensor(0.2082) + >>> mcce = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='max') + >>> mcce(preds, target) + tensor(0.2333) + """ + is_differentiable: bool = False + higher_is_better: bool = False + full_state_update: bool = False + + def __init__( + self, + num_classes: int, + n_bins: int = 15, + norm: Literal["l1", "l2", "max"] = "l1", + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _multiclass_calibration_error_arg_validation(num_classes, n_bins, norm, ignore_index) + self.validate_args = validate_args + self.num_classes = num_classes + self.n_bins = n_bins + self.norm = norm + self.ignore_index = ignore_index + self.add_state("confidences", [], dist_reduce_fx="cat") + self.add_state("accuracies", [], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + if self.validate_args: + _multiclass_calibration_error_tensor_validation(preds, target, self.num_classes, self.ignore_index) + preds, target = _multiclass_confusion_matrix_format( + preds, target, ignore_index=self.ignore_index, convert_to_labels=False + ) + confidences, accuracies = _multiclass_calibration_error_update(preds, target) + self.confidences.append(confidences) + self.accuracies.append(accuracies) + + def compute(self) -> Tensor: + confidences = dim_zero_cat(self.confidences) + accuracies = dim_zero_cat(self.accuracies) + return _ce_compute(confidences, accuracies, self.n_bins, norm=self.norm) + + +class CalibrationError: + r"""`Top-label Calibration Error`_. The expected calibration error can be used to quantify how well a given + model is calibrated e.g. how well the predicted output probabilities of the model matches the actual + probabilities of the ground truth distribution. + + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + .. math:: + \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)} + + .. math:: + \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)} + + .. math:: + \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)} + + Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of + predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed + in an uniform way in the [0,1] range. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of + :mod:`BinaryCalibrationError` and :mod:`MulticlassCalibrationError` for the specific details of + each argument influence and examples. + """ + + def __new__( + cls, + task: Literal["binary", "multiclass"] = None, + n_bins: int = 15, + norm: Literal["l1", "l2", "max"] = "l1", + num_classes: Optional[int] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> Metric: + kwargs.update(dict(n_bins=n_bins, norm=norm, ignore_index=ignore_index, validate_args=validate_args)) + if task == "binary": + return BinaryCalibrationError(**kwargs) + if task == "multiclass": + assert isinstance(num_classes, int) + return MulticlassCalibrationError(num_classes, **kwargs) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/cohen_kappa.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/cohen_kappa.py new file mode 100644 index 0000000000000000000000000000000000000000..eaf469d8d5fb3e670dbb947204a41e9034fa9747 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/cohen_kappa.py @@ -0,0 +1,232 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.classification import BinaryConfusionMatrix, MulticlassConfusionMatrix +from torchmetrics.functional.classification.cohen_kappa import ( + _binary_cohen_kappa_arg_validation, + _cohen_kappa_reduce, + _multiclass_cohen_kappa_arg_validation, +) +from torchmetrics.metric import Metric + + +class BinaryCohenKappa(BinaryConfusionMatrix): + r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement for binary tasks. It is defined + as. + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point + tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per element. + Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``bck`` (:class:`~torch.Tensor`): A tensor containing cohen kappa score + + Args: + threshold: Threshold for transforming probability to binary (0,1) predictions + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + weights: Weighting type to calculate the score. Choose from: + + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (preds is int tensor): + >>> from torchmetrics.classification import BinaryCohenKappa + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> metric = BinaryCohenKappa() + >>> metric(preds, target) + tensor(0.5000) + + Example (preds is float tensor): + >>> from torchmetrics.classification import BinaryCohenKappa + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> metric = BinaryCohenKappa() + >>> metric(preds, target) + tensor(0.5000) + """ + is_differentiable: bool = False + higher_is_better: bool = True + full_state_update: bool = False + + def __init__( + self, + threshold: float = 0.5, + ignore_index: Optional[int] = None, + weights: Optional[Literal["linear", "quadratic", "none"]] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(threshold, ignore_index, normalize=None, validate_args=False, **kwargs) + if validate_args: + _binary_cohen_kappa_arg_validation(threshold, ignore_index, weights) + self.weights = weights + self.validate_args = validate_args + + def compute(self) -> Tensor: + return _cohen_kappa_reduce(self.confmat, self.weights) + + +class MulticlassCohenKappa(MulticlassConfusionMatrix): + r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement for multiclass tasks. It is + defined as. + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): Either an int tensor of shape ``(N, ...)` or float tensor of shape + ``(N, C, ..)``. If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically + convert probabilities/logits into an int tensor. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mcck`` (:class:`~torch.Tensor`): A tensor containing cohen kappa score + + Args: + num_classes: Integer specifing the number of classes + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + weights: Weighting type to calculate the score. Choose from: + + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (pred is integer tensor): + >>> from torchmetrics.classification import MulticlassCohenKappa + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> metric = MulticlassCohenKappa(num_classes=3) + >>> metric(preds, target) + tensor(0.6364) + + Example (pred is float tensor): + >>> from torchmetrics.classification import MulticlassCohenKappa + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> metric = MulticlassCohenKappa(num_classes=3) + >>> metric(preds, target) + tensor(0.6364) + """ + is_differentiable: bool = False + higher_is_better: bool = True + full_state_update: bool = False + + def __init__( + self, + num_classes: int, + ignore_index: Optional[int] = None, + weights: Optional[Literal["linear", "quadratic", "none"]] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(num_classes, ignore_index, normalize=None, validate_args=False, **kwargs) + if validate_args: + _multiclass_cohen_kappa_arg_validation(num_classes, ignore_index, weights) + self.weights = weights + self.validate_args = validate_args + + def compute(self) -> Tensor: + return _cohen_kappa_reduce(self.confmat, self.weights) + + +class CohenKappa: + r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement. It is defined as. + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of + :mod:`BinaryCohenKappa` and :mod:`MulticlassCohenKappa` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> cohenkappa = CohenKappa(task="multiclass", num_classes=2) + >>> cohenkappa(preds, target) + tensor(0.5000) + """ + + def __new__( + cls, + task: Literal["binary", "multiclass"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + weights: Optional[Literal["linear", "quadratic", "none"]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> Metric: + kwargs.update(dict(weights=weights, ignore_index=ignore_index, validate_args=validate_args)) + if task == "binary": + return BinaryCohenKappa(threshold, **kwargs) + if task == "multiclass": + assert isinstance(num_classes, int) + return MulticlassCohenKappa(num_classes, **kwargs) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/confusion_matrix.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/confusion_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..d6dc330b54c71c400d958236df0ad9253a20301d --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/confusion_matrix.py @@ -0,0 +1,375 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.confusion_matrix import ( + _binary_confusion_matrix_arg_validation, + _binary_confusion_matrix_compute, + _binary_confusion_matrix_format, + _binary_confusion_matrix_tensor_validation, + _binary_confusion_matrix_update, + _multiclass_confusion_matrix_arg_validation, + _multiclass_confusion_matrix_compute, + _multiclass_confusion_matrix_format, + _multiclass_confusion_matrix_tensor_validation, + _multiclass_confusion_matrix_update, + _multilabel_confusion_matrix_arg_validation, + _multilabel_confusion_matrix_compute, + _multilabel_confusion_matrix_format, + _multilabel_confusion_matrix_tensor_validation, + _multilabel_confusion_matrix_update, +) +from torchmetrics.metric import Metric + + +class BinaryConfusionMatrix(Metric): + r"""Computes the `confusion matrix`_ for binary tasks. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point + tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per + element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``bcm`` (:class:`~torch.Tensor`): A tensor containing a ``(2, 2)`` matrix + + Args: + threshold: Threshold for transforming probability to binary (0,1) predictions + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + normalize: Normalization mode for confusion matrix. Choose from: + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (preds is int tensor): + >>> from torchmetrics.classification import BinaryConfusionMatrix + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> bcm = BinaryConfusionMatrix() + >>> bcm(preds, target) + tensor([[2, 0], + [1, 1]]) + + Example (preds is float tensor): + >>> from torchmetrics.classification import BinaryConfusionMatrix + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> bcm = BinaryConfusionMatrix() + >>> bcm(preds, target) + tensor([[2, 0], + [1, 1]]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + threshold: float = 0.5, + ignore_index: Optional[int] = None, + normalize: Optional[Literal["true", "pred", "all", "none"]] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize) + self.threshold = threshold + self.ignore_index = ignore_index + self.normalize = normalize + self.validate_args = validate_args + + self.add_state("confmat", torch.zeros(2, 2, dtype=torch.long), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets.""" + if self.validate_args: + _binary_confusion_matrix_tensor_validation(preds, target, self.ignore_index) + preds, target = _binary_confusion_matrix_format(preds, target, self.threshold, self.ignore_index) + confmat = _binary_confusion_matrix_update(preds, target) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computes confusion matrix.""" + return _binary_confusion_matrix_compute(self.confmat, self.normalize) + + +class MulticlassConfusionMatrix(Metric): + r"""Computes the `confusion matrix`_ for multiclass tasks. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point + tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per + element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``bcm`` (:class:`~torch.Tensor`): A tensor containing a ``(2, 2)`` matrix + + --- + + As input to 'update' the metric accepts the following input: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + As output of 'compute' the metric returns the following output: + + - ``confusion matrix``: [num_classes, num_classes] matrix + + Args: + num_classes: Integer specifing the number of classes + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + normalize: Normalization mode for confusion matrix. Choose from: + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (pred is integer tensor): + >>> from torchmetrics.classification import MulticlassConfusionMatrix + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> metric = MulticlassConfusionMatrix(num_classes=3) + >>> metric(preds, target) + tensor([[1, 1, 0], + [0, 1, 0], + [0, 0, 1]]) + + Example (pred is float tensor): + >>> from torchmetrics.classification import MulticlassConfusionMatrix + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> metric = MulticlassConfusionMatrix(num_classes=3) + >>> metric(preds, target) + tensor([[1, 1, 0], + [0, 1, 0], + [0, 0, 1]]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + num_classes: int, + ignore_index: Optional[int] = None, + normalize: Optional[Literal["none", "true", "pred", "all"]] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize) + self.num_classes = num_classes + self.ignore_index = ignore_index + self.normalize = normalize + self.validate_args = validate_args + + self.add_state("confmat", torch.zeros(num_classes, num_classes, dtype=torch.long), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets.""" + if self.validate_args: + _multiclass_confusion_matrix_tensor_validation(preds, target, self.num_classes, self.ignore_index) + preds, target = _multiclass_confusion_matrix_format(preds, target, self.ignore_index) + confmat = _multiclass_confusion_matrix_update(preds, target, self.num_classes) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computes confusion matrix.""" + return _multiclass_confusion_matrix_compute(self.confmat, self.normalize) + + +class MultilabelConfusionMatrix(Metric): + r"""Computes the `confusion matrix`_ for multilabel tasks. + + As input to 'update' the metric accepts the following input: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + As output of 'compute' the metric returns the following output: + + - ``confusion matrix``: [num_labels,2,2] matrix + + Args: + num_classes: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + normalize: Normalization mode for confusion matrix. Choose from: + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (preds is int tensor): + >>> from torchmetrics.classification import MultilabelConfusionMatrix + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> metric = MultilabelConfusionMatrix(num_labels=3) + >>> metric(preds, target) + tensor([[[1, 0], [0, 1]], + [[1, 0], [1, 0]], + [[0, 1], [0, 1]]]) + + Example (preds is float tensor): + >>> from torchmetrics.classification import MultilabelConfusionMatrix + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> metric = MultilabelConfusionMatrix(num_labels=3) + >>> metric(preds, target) + tensor([[[1, 0], [0, 1]], + [[1, 0], [1, 0]], + [[0, 1], [0, 1]]]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + num_labels: int, + threshold: float = 0.5, + ignore_index: Optional[int] = None, + normalize: Optional[Literal["none", "true", "pred", "all"]] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index, normalize) + self.num_labels = num_labels + self.threshold = threshold + self.ignore_index = ignore_index + self.normalize = normalize + self.validate_args = validate_args + + self.add_state("confmat", torch.zeros(num_labels, 2, 2, dtype=torch.long), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + """Update state with predictions and targets.""" + if self.validate_args: + _multilabel_confusion_matrix_tensor_validation(preds, target, self.num_labels, self.ignore_index) + preds, target = _multilabel_confusion_matrix_format( + preds, target, self.num_labels, self.threshold, self.ignore_index + ) + confmat = _multilabel_confusion_matrix_update(preds, target, self.num_labels) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computes confusion matrix.""" + return _multilabel_confusion_matrix_compute(self.confmat, self.normalize) + + +class ConfusionMatrix: + r"""Computes the `confusion matrix`_. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :mod:`BinaryConfusionMatrix`, :mod:`MulticlassConfusionMatrix` and :func:`MultilabelConfusionMatrix` for + the specific details of each argument influence and examples. + + Legacy Example: + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> confmat = ConfusionMatrix(task="binary", num_classes=2) + >>> confmat(preds, target) + tensor([[2, 0], + [1, 1]]) + + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> confmat = ConfusionMatrix(task="multiclass", num_classes=3) + >>> confmat(preds, target) + tensor([[1, 1, 0], + [0, 1, 0], + [0, 0, 1]]) + + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> confmat = ConfusionMatrix(task="multilabel", num_labels=3) + >>> confmat(preds, target) + tensor([[[1, 0], [0, 1]], + [[1, 0], [1, 0]], + [[0, 1], [0, 1]]]) + """ + + def __new__( + cls, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + normalize: Optional[Literal["true", "pred", "all", "none"]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> Metric: + kwargs.update(dict(normalize=normalize, ignore_index=ignore_index, validate_args=validate_args)) + if task == "binary": + return BinaryConfusionMatrix(threshold, **kwargs) + if task == "multiclass": + assert isinstance(num_classes, int) + return MulticlassConfusionMatrix(num_classes, **kwargs) + if task == "multilabel": + assert isinstance(num_labels, int) + return MultilabelConfusionMatrix(num_labels, threshold, **kwargs) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/dice.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/dice.py new file mode 100644 index 0000000000000000000000000000000000000000..6846e9eb5be4c27e1ad8860a802ba7647113d31d --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/dice.py @@ -0,0 +1,237 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Optional, Tuple, no_type_check + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.dice import _dice_compute +from torchmetrics.functional.classification.stat_scores import _stat_scores_update +from torchmetrics.metric import Metric +from torchmetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +class Dice(Metric): + r"""Computes `Dice`_: + + .. math:: \text{Dice} = \frac{\text{2 * TP}}{\text{2 * TP} + \text{FP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. + + It is recommend set `ignore_index` to index of background class. + + The reduction method (how the precision scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): Predictions from model (probabilities, logits or labels) + - ``target`` (:class:`~torch.Tensor`): Ground truth values + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``dice`` (:class:`~torch.Tensor`): A tensor containing the dice score. + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number of classes + + Args: + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + zero_division: + The value to use for the score if denominator equals zero. + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: + What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + are flattened into a new ``N_X`` sample axis, i.e. + the inputs are treated as if they were ``(N_X, C)``. + From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + top_k: + Number of the highest probability or logit score predictions considered finding the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + Should be left at default (``None``) for all other types of inputs. + + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. + + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Raises: + ValueError: + If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> import torch + >>> from torchmetrics import Dice + >>> preds = torch.tensor([2, 0, 2, 1]) + >>> target = torch.tensor([1, 1, 2, 0]) + >>> dice = Dice(average='micro') + >>> dice(preds, target) + tensor(0.2500) + """ + is_differentiable: bool = False + higher_is_better: bool = True + full_state_update: bool = False + + @no_type_check + def __init__( + self, + zero_division: int = 0, + num_classes: Optional[int] = None, + threshold: float = 0.5, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + mdmc_average: Optional[str] = "global", + ignore_index: Optional[int] = None, + top_k: Optional[int] = None, + multiclass: Optional[bool] = None, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + allowed_average = ("micro", "macro", "weighted", "samples", "none", None) + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + _reduce_options = (AverageMethod.WEIGHTED, AverageMethod.NONE, None) + if "reduce" not in kwargs: + kwargs["reduce"] = AverageMethod.MACRO if average in _reduce_options else average + if "mdmc_reduce" not in kwargs: + kwargs["mdmc_reduce"] = mdmc_average + + self.reduce = average + self.mdmc_reduce = mdmc_average + self.num_classes = num_classes + self.threshold = threshold + self.multiclass = multiclass + self.ignore_index = ignore_index + self.top_k = top_k + + if average not in ["micro", "macro", "samples"]: + raise ValueError(f"The `reduce` {average} is not valid.") + + if mdmc_average not in [None, "samplewise", "global"]: + raise ValueError(f"The `mdmc_reduce` {mdmc_average} is not valid.") + + if average == "macro" and (not num_classes or num_classes < 1): + raise ValueError("When you set `average` as 'macro', you have to provide the number of classes.") + + if num_classes and ignore_index is not None and (not ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + default: Callable = lambda: [] + reduce_fn: Optional[str] = "cat" + if mdmc_average != "samplewise" and average != "samples": + if average == "micro": + zeros_shape = [] + elif average == "macro": + zeros_shape = [num_classes] + else: + raise ValueError(f'Wrong reduce="{average}"') + default = lambda: torch.zeros(zeros_shape, dtype=torch.long) + reduce_fn = "sum" + + for s in ("tp", "fp", "tn", "fn"): + self.add_state(s, default=default(), dist_reduce_fx=reduce_fn) + + self.average = average + self.zero_division = zero_division + + @no_type_check + def update(self, preds: Tensor, target: Tensor) -> None: + """Update state with predictions and targets.""" + tp, fp, tn, fn = _stat_scores_update( + preds, + target, + reduce=self.reduce, + mdmc_reduce=self.mdmc_reduce, + threshold=self.threshold, + num_classes=self.num_classes, + top_k=self.top_k, + multiclass=self.multiclass, + ignore_index=self.ignore_index, + ) + + # Update states + if self.reduce != AverageMethod.SAMPLES and self.mdmc_reduce != MDMCAverageMethod.SAMPLEWISE: + self.tp += tp + self.fp += fp + self.tn += tn + self.fn += fn + else: + self.tp.append(tp) + self.fp.append(fp) + self.tn.append(tn) + self.fn.append(fn) + + @no_type_check + def _get_final_stats(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Performs concatenation on the stat scores if neccesary, before passing them to a compute function.""" + tp = torch.cat(self.tp) if isinstance(self.tp, list) else self.tp + fp = torch.cat(self.fp) if isinstance(self.fp, list) else self.fp + tn = torch.cat(self.tn) if isinstance(self.tn, list) else self.tn + fn = torch.cat(self.fn) if isinstance(self.fn, list) else self.fn + return tp, fp, tn, fn + + @no_type_check + def compute(self) -> Tensor: + """Computes metric.""" + tp, fp, _, fn = self._get_final_stats() + return _dice_compute(tp, fp, fn, self.average, self.mdmc_reduce, self.zero_division) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/hamming.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/hamming.py new file mode 100644 index 0000000000000000000000000000000000000000..3620d899ed2bcf97c86c6a95b426f5ca8c5d556a --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/hamming.py @@ -0,0 +1,368 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores +from torchmetrics.functional.classification.hamming import _hamming_distance_reduce +from torchmetrics.metric import Metric + + +class BinaryHammingDistance(BinaryStatScores): + r"""Computes the average `Hamming distance`_ (also known as Hamming loss) for binary tasks: + + .. math:: + \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il}) + + Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, + and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that + tensor. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point + tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per + element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. + + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``bhd`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``, the metric returns a scalar value. + - If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a + scalar value per sample. + + Args: + threshold: Threshold for transforming probability to binary {0,1} predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example (preds is int tensor): + >>> from torchmetrics.classification import BinaryHammingDistance + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> metric = BinaryHammingDistance() + >>> metric(preds, target) + tensor(0.3333) + + Example (preds is float tensor): + >>> from torchmetrics.classification import BinaryHammingDistance + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> metric = BinaryHammingDistance() + >>> metric(preds, target) + tensor(0.3333) + + Example (multidim tensors): + >>> from torchmetrics.classification import BinaryHammingDistance + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> metric = BinaryHammingDistance(multidim_average='samplewise') + >>> metric(preds, target) + tensor([0.6667, 0.8333]) + """ + + is_differentiable: bool = False + higher_is_better: bool = False + full_state_update: bool = False + + def compute(self) -> Tensor: + tp, fp, tn, fn = self._final_state() + return _hamming_distance_reduce(tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average) + + +class MulticlassHammingDistance(MulticlassStatScores): + r"""Computes the average `Hamming distance`_ (also known as Hamming loss) for multiclass tasks: + + .. math:: + \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il}) + + Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, + and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that + tensor. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``. + If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert + probabilities/logits into an int tensor. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. + + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mchd`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and + ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Args: + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + top_k: + Number of highest probability or logit score predictions considered to find the correct label. + Only works when ``preds`` contain probabilities/logits. + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example (preds is int tensor): + >>> from torchmetrics.classification import MulticlassHammingDistance + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> metric = MulticlassHammingDistance(num_classes=3) + >>> metric(preds, target) + tensor(0.1667) + >>> mchd = MulticlassHammingDistance(num_classes=3, average=None) + >>> mchd(preds, target) + tensor([0.5000, 0.0000, 0.0000]) + + Example (preds is float tensor): + >>> from torchmetrics.classification import MulticlassHammingDistance + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> metric = MulticlassHammingDistance(num_classes=3) + >>> metric(preds, target) + tensor(0.1667) + >>> mchd = MulticlassHammingDistance(num_classes=3, average=None) + >>> mchd(preds, target) + tensor([0.5000, 0.0000, 0.0000]) + + Example (multidim tensors): + >>> from torchmetrics.classification import MulticlassHammingDistance + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> metric = MulticlassHammingDistance(num_classes=3, multidim_average='samplewise') + >>> metric(preds, target) + tensor([0.5000, 0.7222]) + >>> mchd = MulticlassHammingDistance(num_classes=3, multidim_average='samplewise', average=None) + >>> mchd(preds, target) + tensor([[0.0000, 1.0000, 0.5000], + [1.0000, 0.6667, 0.5000]]) + """ + + is_differentiable: bool = False + higher_is_better: bool = False + full_state_update: bool = False + + def compute(self) -> Tensor: + tp, fp, tn, fn = self._final_state() + return _hamming_distance_reduce(tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average) + + +class MultilabelHammingDistance(MultilabelStatScores): + r"""Computes the average `Hamming distance`_ (also known as Hamming loss) for multilabel tasks: + + .. math:: + \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il}) + + Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, + and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that + tensor. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ...)``. If preds is a + floating point tensor with values outside [0,1] range we consider the input to be logits and will auto + apply sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value in + ``threshold``. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. + + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mlhd`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and + ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Args: + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example (preds is int tensor): + >>> from torchmetrics.classification import MultilabelHammingDistance + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> metric = MultilabelHammingDistance(num_labels=3) + >>> metric(preds, target) + tensor(0.3333) + >>> mlhd = MultilabelHammingDistance(num_labels=3, average=None) + >>> mlhd(preds, target) + tensor([0.0000, 0.5000, 0.5000]) + + Example (preds is float tensor): + >>> from torchmetrics.classification import MultilabelHammingDistance + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> metric = MultilabelHammingDistance(num_labels=3) + >>> metric(preds, target) + tensor(0.3333) + >>> mlhd = MultilabelHammingDistance(num_labels=3, average=None) + >>> mlhd(preds, target) + tensor([0.0000, 0.5000, 0.5000]) + + Example (multidim tensors): + >>> from torchmetrics.classification import MultilabelHammingDistance + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> metric = MultilabelHammingDistance(num_labels=3, multidim_average='samplewise') + >>> metric(preds, target) + tensor([0.6667, 0.8333]) + >>> mlhd = MultilabelHammingDistance(num_labels=3, multidim_average='samplewise', average=None) + >>> mlhd(preds, target) + tensor([[0.5000, 0.5000, 1.0000], + [1.0000, 1.0000, 0.5000]]) + """ + + is_differentiable: bool = False + higher_is_better: bool = False + full_state_update: bool = False + + def compute(self) -> Tensor: + tp, fp, tn, fn = self._final_state() + return _hamming_distance_reduce( + tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average, multilabel=True + ) + + +class HammingDistance: + r"""Computes the average `Hamming distance`_ (also known as Hamming loss): + + .. math:: + \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il}) + + Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions, + and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that + tensor. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :mod:`BinaryHammingDistance`, :mod:`MulticlassHammingDistance` and :mod:`MultilabelHammingDistance` for the + specific details of each argument influence and examples. + + Legacy Example: + >>> target = torch.tensor([[0, 1], [1, 1]]) + >>> preds = torch.tensor([[0, 1], [0, 1]]) + >>> hamming_distance = HammingDistance(task="multilabel", num_labels=2) + >>> hamming_distance(preds, target) + tensor(0.2500) + """ + + def __new__( + cls, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + multidim_average: Optional[Literal["global", "samplewise"]] = "global", + top_k: Optional[int] = 1, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> Metric: + + assert multidim_average is not None + kwargs.update(dict(multidim_average=multidim_average, ignore_index=ignore_index, validate_args=validate_args)) + if task == "binary": + return BinaryHammingDistance(threshold, **kwargs) + if task == "multiclass": + assert isinstance(num_classes, int) + assert isinstance(top_k, int) + return MulticlassHammingDistance(num_classes, top_k, average, **kwargs) + if task == "multilabel": + assert isinstance(num_labels, int) + return MultilabelHammingDistance(num_labels, threshold, average, **kwargs) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall.py new file mode 100644 index 0000000000000000000000000000000000000000..8947534553c98bbdb52d555aeca462af7ec19a6e --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall.py @@ -0,0 +1,701 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores +from torchmetrics.functional.classification.precision_recall import _precision_recall_reduce +from torchmetrics.metric import Metric + + +class BinaryPrecision(BinaryStatScores): + r"""Computes `Precision`_ for binary tasks: + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point + tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per + element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. + + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``bp`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, the metric returns a scalar + value. If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a + scalar value per sample. + + Args: + threshold: Threshold for transforming probability to binary {0,1} predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example (preds is int tensor): + >>> from torchmetrics.classification import BinaryPrecision + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> metric = BinaryPrecision() + >>> metric(preds, target) + tensor(0.6667) + + Example (preds is float tensor): + >>> from torchmetrics.classification import BinaryPrecision + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> metric = BinaryPrecision() + >>> metric(preds, target) + tensor(0.6667) + + Example (multidim tensors): + >>> from torchmetrics.classification import BinaryPrecision + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> metric = BinaryPrecision(multidim_average='samplewise') + >>> metric(preds, target) + tensor([0.4000, 0.0000]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = True + full_state_update: bool = False + + def compute(self) -> Tensor: + tp, fp, tn, fn = self._final_state() + return _precision_recall_reduce( + "precision", tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average + ) + + +class MulticlassPrecision(MulticlassStatScores): + r"""Computes `Precision`_ for multiclass tasks. + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``. + If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert + probabilities/logits into an int tensor. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. + + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mcp`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average`` + arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Args: + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + top_k: + Number of highest probability or logit score predictions considered to find the correct label. + Only works when ``preds`` contain probabilities/logits. + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example (preds is int tensor): + >>> from torchmetrics.classification import MulticlassPrecision + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> metric = MulticlassPrecision(num_classes=3) + >>> metric(preds, target) + tensor(0.8333) + >>> mcp = MulticlassPrecision(num_classes=3, average=None) + >>> mcp(preds, target) + tensor([1.0000, 0.5000, 1.0000]) + + Example (preds is float tensor): + >>> from torchmetrics.classification import MulticlassPrecision + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> metric = MulticlassPrecision(num_classes=3) + >>> metric(preds, target) + tensor(0.8333) + >>> mcp = MulticlassPrecision(num_classes=3, average=None) + >>> mcp(preds, target) + tensor([1.0000, 0.5000, 1.0000]) + + Example (multidim tensors): + >>> from torchmetrics.classification import MulticlassPrecision + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> metric = MulticlassPrecision(num_classes=3, multidim_average='samplewise') + >>> metric(preds, target) + tensor([0.3889, 0.2778]) + >>> mcp = MulticlassPrecision(num_classes=3, multidim_average='samplewise', average=None) + >>> mcp(preds, target) + tensor([[0.6667, 0.0000, 0.5000], + [0.0000, 0.5000, 0.3333]]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = True + full_state_update: bool = False + + def compute(self) -> Tensor: + tp, fp, tn, fn = self._final_state() + return _precision_recall_reduce( + "precision", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average + ) + + +class MultilabelPrecision(MultilabelStatScores): + r"""Computes `Precision`_ for multilabel tasks. + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ...)``. + If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and + will auto apply sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value + in ``threshold``. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. + + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mlp`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average`` + arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Args: + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example (preds is int tensor): + >>> from torchmetrics.classification import MultilabelPrecision + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> metric = MultilabelPrecision(num_labels=3) + >>> metric(preds, target) + tensor(0.5000) + >>> mlp = MultilabelPrecision(num_labels=3, average=None) + >>> mlp(preds, target) + tensor([1.0000, 0.0000, 0.5000]) + + Example (preds is float tensor): + >>> from torchmetrics.classification import MultilabelPrecision + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> metric = MultilabelPrecision(num_labels=3) + >>> metric(preds, target) + tensor(0.5000) + >>> mlp = MultilabelPrecision(num_labels=3, average=None) + >>> mlp(preds, target) + tensor([1.0000, 0.0000, 0.5000]) + + Example (multidim tensors): + >>> from torchmetrics.classification import MultilabelPrecision + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> metric = MultilabelPrecision(num_labels=3, multidim_average='samplewise') + >>> metric(preds, target) + tensor([0.3333, 0.0000]) + >>> mlp = MultilabelPrecision(num_labels=3, multidim_average='samplewise', average=None) + >>> mlp(preds, target) + tensor([[0.5000, 0.5000, 0.0000], + [0.0000, 0.0000, 0.0000]]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = True + full_state_update: bool = False + + def compute(self) -> Tensor: + tp, fp, tn, fn = self._final_state() + return _precision_recall_reduce( + "precision", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average + ) + + +class BinaryRecall(BinaryStatScores): + r"""Computes `Recall`_ for binary tasks: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, ...)``. If preds is a + floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` + + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``br`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, the metric returns a scalar + value. If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of + a scalar value per sample. + + Args: + threshold: Threshold for transforming probability to binary {0,1} predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example (preds is int tensor): + >>> from torchmetrics.classification import BinaryRecall + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> metric = BinaryRecall() + >>> metric(preds, target) + tensor(0.6667) + + Example (preds is float tensor): + >>> from torchmetrics.classification import BinaryRecall + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> metric = BinaryRecall() + >>> metric(preds, target) + tensor(0.6667) + + Example (multidim tensors): + >>> from torchmetrics.classification import BinaryRecall + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> metric = BinaryRecall(multidim_average='samplewise') + >>> metric(preds, target) + tensor([0.6667, 0.0000]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = True + full_state_update: bool = False + + def compute(self) -> Tensor: + tp, fp, tn, fn = self._final_state() + return _precision_recall_reduce( + "recall", tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average + ) + + +class MulticlassRecall(MulticlassStatScores): + r"""Computes `Recall`_ for multiclass tasks: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)`` + If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert + probabilities/logits into an int tensor. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` + + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mcr`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average`` + arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Args: + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + top_k: + Number of highest probability or logit score predictions considered to find the correct label. + Only works when ``preds`` contain probabilities/logits. + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example (preds is int tensor): + >>> from torchmetrics.classification import MulticlassRecall + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> metric = MulticlassRecall(num_classes=3) + >>> metric(preds, target) + tensor(0.8333) + >>> mcr = MulticlassRecall(num_classes=3, average=None) + >>> mcr(preds, target) + tensor([0.5000, 1.0000, 1.0000]) + + Example (preds is float tensor): + >>> from torchmetrics.classification import MulticlassRecall + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> metric = MulticlassRecall(num_classes=3) + >>> metric(preds, target) + tensor(0.8333) + >>> mcr = MulticlassRecall(num_classes=3, average=None) + >>> mcr(preds, target) + tensor([0.5000, 1.0000, 1.0000]) + + Example (multidim tensors): + >>> from torchmetrics.classification import MulticlassRecall + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> metric = MulticlassRecall(num_classes=3, multidim_average='samplewise') + >>> metric(preds, target) + tensor([0.5000, 0.2778]) + >>> mcr = MulticlassRecall(num_classes=3, multidim_average='samplewise', average=None) + >>> mcr(preds, target) + tensor([[1.0000, 0.0000, 0.5000], + [0.0000, 0.3333, 0.5000]]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = True + full_state_update: bool = False + + def compute(self) -> Tensor: + tp, fp, tn, fn = self._final_state() + return _precision_recall_reduce( + "recall", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average + ) + + +class MultilabelRecall(MultilabelStatScores): + r"""Computes `Recall`_ for multilabel tasks: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating + point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid + per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` + + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mlr`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average`` + arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Args: + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example (preds is int tensor): + >>> from torchmetrics.classification import MultilabelRecall + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> metric = MultilabelRecall(num_labels=3) + >>> metric(preds, target) + tensor(0.6667) + >>> mlr = MultilabelRecall(num_labels=3, average=None) + >>> mlr(preds, target) + tensor([1., 0., 1.]) + + Example (preds is float tensor): + >>> from torchmetrics.classification import MultilabelRecall + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> metric = MultilabelRecall(num_labels=3) + >>> metric(preds, target) + tensor(0.6667) + >>> mlr = MultilabelRecall(num_labels=3, average=None) + >>> mlr(preds, target) + tensor([1., 0., 1.]) + + Example (multidim tensors): + >>> from torchmetrics.classification import MultilabelRecall + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> metric = MultilabelRecall(num_labels=3, multidim_average='samplewise') + >>> metric(preds, target) + tensor([0.6667, 0.0000]) + >>> mlr = MultilabelRecall(num_labels=3, multidim_average='samplewise', average=None) + >>> mlr(preds, target) + tensor([[1., 1., 0.], + [0., 0., 0.]]) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = True + full_state_update: bool = False + + def compute(self) -> Tensor: + tp, fp, tn, fn = self._final_state() + return _precision_recall_reduce( + "recall", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average + ) + + +class Precision: + r"""Computes `Precision`_: + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :mod:`BinaryPrecision`, :func:`MulticlassPrecision` and :func:`MultilabelPrecision` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> import torch + >>> preds = torch.tensor([2, 0, 2, 1]) + >>> target = torch.tensor([1, 1, 2, 0]) + >>> precision = Precision(task="multiclass", average='macro', num_classes=3) + >>> precision(preds, target) + tensor(0.1667) + >>> precision = Precision(task="multiclass", average='micro', num_classes=3) + >>> precision(preds, target) + tensor(0.2500) + """ + + def __new__( + cls, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + multidim_average: Optional[Literal["global", "samplewise"]] = "global", + top_k: Optional[int] = 1, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> Metric: + assert multidim_average is not None + kwargs.update(dict(multidim_average=multidim_average, ignore_index=ignore_index, validate_args=validate_args)) + if task == "binary": + return BinaryPrecision(threshold, **kwargs) + if task == "multiclass": + assert isinstance(num_classes, int) + assert isinstance(top_k, int) + return MulticlassPrecision(num_classes, top_k, average, **kwargs) + if task == "multilabel": + assert isinstance(num_labels, int) + return MultilabelPrecision(num_labels, threshold, average, **kwargs) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) + + +class Recall: + r"""Computes `Recall`_: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :mod:`BinaryRecall`, :mod:`MulticlassRecall` and :mod:`MultilabelRecall` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> import torch + >>> preds = torch.tensor([2, 0, 2, 1]) + >>> target = torch.tensor([1, 1, 2, 0]) + >>> recall = Recall(task="multiclass", average='macro', num_classes=3) + >>> recall(preds, target) + tensor(0.3333) + >>> recall = Recall(task="multiclass", average='micro', num_classes=3) + >>> recall(preds, target) + tensor(0.2500) + """ + + def __new__( + cls, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + multidim_average: Optional[Literal["global", "samplewise"]] = "global", + top_k: Optional[int] = 1, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> Metric: + assert multidim_average is not None + kwargs.update(dict(multidim_average=multidim_average, ignore_index=ignore_index, validate_args=validate_args)) + if task == "binary": + return BinaryRecall(threshold, **kwargs) + if task == "multiclass": + assert isinstance(num_classes, int) + assert isinstance(top_k, int) + return MulticlassRecall(num_classes, top_k, average, **kwargs) + if task == "multilabel": + assert isinstance(num_labels, int) + return MultilabelRecall(num_labels, threshold, average, **kwargs) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall_curve.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall_curve.py new file mode 100644 index 0000000000000000000000000000000000000000..983bce2d19ab3458c17a00c3ec113775bb28be21 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall_curve.py @@ -0,0 +1,489 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, List, Optional, Tuple, Union + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.precision_recall_curve import ( + _adjust_threshold_arg, + _binary_precision_recall_curve_arg_validation, + _binary_precision_recall_curve_compute, + _binary_precision_recall_curve_format, + _binary_precision_recall_curve_tensor_validation, + _binary_precision_recall_curve_update, + _multiclass_precision_recall_curve_arg_validation, + _multiclass_precision_recall_curve_compute, + _multiclass_precision_recall_curve_format, + _multiclass_precision_recall_curve_tensor_validation, + _multiclass_precision_recall_curve_update, + _multilabel_precision_recall_curve_arg_validation, + _multilabel_precision_recall_curve_compute, + _multilabel_precision_recall_curve_format, + _multilabel_precision_recall_curve_tensor_validation, + _multilabel_precision_recall_curve_update, +) +from torchmetrics.metric import Metric +from torchmetrics.utilities.data import dim_zero_cat + + +class BinaryPrecisionRecallCurve(Metric): + r"""Computes the precision-recall curve for binary tasks. The curve consist of multiple pairs of precision and + recall values evaluated at different thresholds, such that the tradeoff between the two values can been seen. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``. Preds should be a tensor containing + probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input + to be logits and will auto apply sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing + ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). The value + 1 always encodes the positive class. + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``precision`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d + tensor of size ``(n_thresholds+1, )`` with precision values (length may differ between classes). If `thresholds` + is set to something else, then a single 2d tensor of size ``(n_classes, n_thresholds+1)`` with precision values + is returned. + - ``recall`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d tensor + of size ``(n_thresholds+1, )`` with recall values (length may differ between classes). If `thresholds` is set to + something else, then a single 2d tensor of size ``(n_classes, n_thresholds+1)`` with recall values is returned. + - ``thresholds`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d + tensor of size ``(n_thresholds, )`` with increasing threshold values (length may differ between classes). If + `threshold` is set to something else, then a single 1d tensor of size ``(n_thresholds, )`` is returned with + shared threshold values for all classes. + + .. note:: + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds})` (constant memory). + + Args: + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import BinaryPrecisionRecallCurve + >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) + >>> target = torch.tensor([0, 1, 1, 0]) + >>> bprc = BinaryPrecisionRecallCurve(thresholds=None) + >>> bprc(preds, target) # doctest: +NORMALIZE_WHITESPACE + (tensor([0.6667, 0.5000, 0.0000, 1.0000]), + tensor([1.0000, 0.5000, 0.0000, 0.0000]), + tensor([0.5000, 0.7000, 0.8000])) + >>> bprc = BinaryPrecisionRecallCurve(thresholds=5) + >>> bprc(preds, target) # doctest: +NORMALIZE_WHITESPACE + (tensor([0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000]), + tensor([1., 1., 1., 0., 0., 0.]), + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _binary_precision_recall_curve_arg_validation(thresholds, ignore_index) + + self.ignore_index = ignore_index + self.validate_args = validate_args + + thresholds = _adjust_threshold_arg(thresholds) + if thresholds is None: + self.thresholds = thresholds + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + else: + self.register_buffer("thresholds", thresholds) + self.add_state( + "confmat", default=torch.zeros(len(thresholds), 2, 2, dtype=torch.long), dist_reduce_fx="sum" + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + if self.validate_args: + _binary_precision_recall_curve_tensor_validation(preds, target, self.ignore_index) + preds, target, _ = _binary_precision_recall_curve_format(preds, target, self.thresholds, self.ignore_index) + state = _binary_precision_recall_curve_update(preds, target, self.thresholds) + if isinstance(state, Tensor): + self.confmat += state + else: + self.preds.append(state[0]) + self.target.append(state[1]) + + def compute(self) -> Tuple[Tensor, Tensor, Tensor]: + if self.thresholds is None: + state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] + else: + state = self.confmat + return _binary_precision_recall_curve_compute(state, self.thresholds) + + +class MulticlassPrecisionRecallCurve(Metric): + r"""Computes the precision-recall curve for multiclass tasks. The curve consist of multiple pairs of precision + and recall values evaluated at different thresholds, such that the tradeoff between the two values can been + seen. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor containing + probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input to + be logits and will auto apply softmax per sample. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing + ground truth labels, and therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` + is specified). + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``precision`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds+1, )`` with precision values + - ``recall`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds+1, )`` with recall values + - ``thresholds`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds, )`` with increasing threshold values + + .. note:: + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). + + Args: + num_classes: Integer specifing the number of classes + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.classification import MulticlassPrecisionRecallCurve + >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> mcprc = MulticlassPrecisionRecallCurve(num_classes=5, thresholds=None) + >>> precision, recall, thresholds = mcprc(preds, target) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor(0.7500), tensor(0.7500), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor(0.0500)] + >>> mcprc = MulticlassPrecisionRecallCurve(num_classes=5, thresholds=5) + >>> mcprc(preds, target) # doctest: +NORMALIZE_WHITESPACE + (tensor([[0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000], + [0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000], + [0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000], + [0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]), + tensor([[1., 1., 1., 1., 0., 0.], + [1., 1., 1., 1., 0., 0.], + [1., 0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0.]]), + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + num_classes: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index) + + self.num_classes = num_classes + self.ignore_index = ignore_index + self.validate_args = validate_args + + thresholds = _adjust_threshold_arg(thresholds) + if thresholds is None: + self.thresholds = thresholds + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + else: + self.register_buffer("thresholds", thresholds) + self.add_state( + "confmat", + default=torch.zeros(len(thresholds), num_classes, 2, 2, dtype=torch.long), + dist_reduce_fx="sum", + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + if self.validate_args: + _multiclass_precision_recall_curve_tensor_validation(preds, target, self.num_classes, self.ignore_index) + preds, target, _ = _multiclass_precision_recall_curve_format( + preds, target, self.num_classes, self.thresholds, self.ignore_index + ) + state = _multiclass_precision_recall_curve_update(preds, target, self.num_classes, self.thresholds) + if isinstance(state, Tensor): + self.confmat += state + else: + self.preds.append(state[0]) + self.target.append(state[1]) + + def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + if self.thresholds is None: + state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] + else: + state = self.confmat + return _multiclass_precision_recall_curve_compute(state, self.num_classes, self.thresholds) + + +class MultilabelPrecisionRecallCurve(Metric): + r"""Computes the precision-recall curve for multilabel tasks. The curve consist of multiple pairs of precision + and recall values evaluated at different thresholds, such that the tradeoff between the two values can been + seen. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor containing + probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input to + be logits and will auto apply sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor containing + ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following a tuple of either 3 tensors or + 3 lists containing: + + - ``precision`` (:class:`~torch.Tensor` or :class:`~List`): if `thresholds=None` a list for each label is returned + with an 1d tensor of size ``(n_thresholds+1, )`` with precision values (length may differ between labels). If + `thresholds` is set to something else, then a single 2d tensor of size ``(n_labels, n_thresholds+1)`` with + precision values is returned. + - ``recall`` (:class:`~torch.Tensor` or :class:`~List`): if `thresholds=None` a list for each label is returned + with an 1d tensor of size ``(n_thresholds+1, )`` with recall values (length may differ between labels). If + `thresholds` is set to something else, then a single 2d tensor of size ``(n_labels, n_thresholds+1)`` with recall + values is returned. + - ``thresholds`` (:class:`~torch.Tensor` or :class:`~List`): if `thresholds=None` a list for each label is + returned with an 1d tensor of size ``(n_thresholds, )`` with increasing threshold values (length may differ + between labels). If `threshold` is set to something else, then a single 1d tensor of size ``(n_thresholds, )`` + is returned with shared threshold values for all labels. + + .. note:: + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example: + >>> from torchmetrics.classification import MultilabelPrecisionRecallCurve + >>> preds = torch.tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = torch.tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) + >>> mlprc = MultilabelPrecisionRecallCurve(num_labels=3, thresholds=None) + >>> precision, recall, thresholds = mlprc(preds, target) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([0.5000, 0.5000, 1.0000, 1.0000]), tensor([0.6667, 0.5000, 0.0000, 1.0000]), + tensor([0.7500, 1.0000, 1.0000, 1.0000])] + >>> recall # doctest: +NORMALIZE_WHITESPACE + [tensor([1.0000, 0.5000, 0.5000, 0.0000]), tensor([1.0000, 0.5000, 0.0000, 0.0000]), + tensor([1.0000, 0.6667, 0.3333, 0.0000])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0500, 0.4500, 0.7500]), tensor([0.5500, 0.6500, 0.7500]), + tensor([0.0500, 0.3500, 0.7500])] + >>> mlprc = MultilabelPrecisionRecallCurve(num_labels=3, thresholds=5) + >>> mlprc(preds, target) # doctest: +NORMALIZE_WHITESPACE + (tensor([[0.5000, 0.5000, 1.0000, 1.0000, 0.0000, 1.0000], + [0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000], + [0.7500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000]]), + tensor([[1.0000, 0.5000, 0.5000, 0.5000, 0.0000, 0.0000], + [1.0000, 1.0000, 1.0000, 0.0000, 0.0000, 0.0000], + [1.0000, 0.6667, 0.3333, 0.3333, 0.0000, 0.0000]]), + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])) + """ + is_differentiable: bool = False + higher_is_better: Optional[bool] = None + full_state_update: bool = False + + def __init__( + self, + num_labels: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index) + + self.num_labels = num_labels + self.ignore_index = ignore_index + self.validate_args = validate_args + + thresholds = _adjust_threshold_arg(thresholds) + if thresholds is None: + self.thresholds = thresholds + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + else: + self.register_buffer("thresholds", thresholds) + self.add_state( + "confmat", + default=torch.zeros(len(thresholds), num_labels, 2, 2, dtype=torch.long), + dist_reduce_fx="sum", + ) + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + if self.validate_args: + _multilabel_precision_recall_curve_tensor_validation(preds, target, self.num_labels, self.ignore_index) + preds, target, _ = _multilabel_precision_recall_curve_format( + preds, target, self.num_labels, self.thresholds, self.ignore_index + ) + state = _multilabel_precision_recall_curve_update(preds, target, self.num_labels, self.thresholds) + if isinstance(state, Tensor): + self.confmat += state + else: + self.preds.append(state[0]) + self.target.append(state[1]) + + def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + if self.thresholds is None: + state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)] + else: + state = self.confmat + return _multilabel_precision_recall_curve_compute(state, self.num_labels, self.thresholds, self.ignore_index) + + +class PrecisionRecallCurve: + r"""Computes the precision-recall curve. The curve consist of multiple pairs of precision and recall values + evaluated at different thresholds, such that the tradeoff between the two values can been seen. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :mod:`BinaryPrecisionRecallCurve`, :mod:`MulticlassPrecisionRecallCurve` and + :mod:`MultilabelPrecisionRecallCurve` for the specific details of each argument influence and examples. + + Legacy Example: + >>> pred = torch.tensor([0, 0.1, 0.8, 0.4]) + >>> target = torch.tensor([0, 1, 1, 0]) + >>> pr_curve = PrecisionRecallCurve(task="binary") + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision + tensor([0.6667, 0.5000, 1.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.5000, 0.0000]) + >>> thresholds + tensor([0.1000, 0.4000, 0.8000]) + + >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> pr_curve = PrecisionRecallCurve(task="multiclass", num_classes=5) + >>> precision, recall, thresholds = pr_curve(pred, target) + >>> precision + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor(0.7500), tensor(0.7500), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor(0.0500)] + """ + + def __new__( + cls, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], Tensor]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> Metric: + kwargs.update(dict(thresholds=thresholds, ignore_index=ignore_index, validate_args=validate_args)) + if task == "binary": + return BinaryPrecisionRecallCurve(**kwargs) + if task == "multiclass": + assert isinstance(num_classes, int) + return MulticlassPrecisionRecallCurve(num_classes, **kwargs) + if task == "multilabel": + assert isinstance(num_labels, int) + return MultilabelPrecisionRecallCurve(num_labels, **kwargs) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/classification/ranking.py b/wemm/lib/python3.10/site-packages/torchmetrics/classification/ranking.py new file mode 100644 index 0000000000000000000000000000000000000000..6e8431d3986606cbc61ceb04b775d1969004dd67 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/classification/ranking.py @@ -0,0 +1,242 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional + +import torch +from torch import Tensor + +from torchmetrics.functional.classification.ranking import ( + _multilabel_confusion_matrix_arg_validation, + _multilabel_confusion_matrix_format, + _multilabel_coverage_error_update, + _multilabel_ranking_average_precision_update, + _multilabel_ranking_loss_update, + _multilabel_ranking_tensor_validation, + _ranking_reduce, +) +from torchmetrics.metric import Metric + + +class MultilabelCoverageError(Metric): + """Computes `Multilabel coverage error`_. The score measure how far we need to go through the ranked scores to + cover all true labels. The best value is equal to the average number of labels in the target tensor per sample. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor + containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider + the input to be logits and will auto apply sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor + containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mlce`` (:class:`~torch.Tensor`): A tensor containing the multilabel coverage error. + + Args: + num_labels: Integer specifing the number of labels + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example: + >>> from torchmetrics.classification import MultilabelCoverageError + >>> _ = torch.manual_seed(42) + >>> preds = torch.rand(10, 5) + >>> target = torch.randint(2, (10, 5)) + >>> mlce = MultilabelCoverageError(num_labels=5) + >>> mlce(preds, target) + tensor(3.9000) + """ + + higher_is_better: bool = False + is_differentiable: bool = False + full_state_update: bool = False + + def __init__( + self, + num_labels: int, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index) + self.validate_args = validate_args + self.num_labels = num_labels + self.ignore_index = ignore_index + self.add_state("measure", torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + if self.validate_args: + _multilabel_ranking_tensor_validation(preds, target, self.num_labels, self.ignore_index) + preds, target = _multilabel_confusion_matrix_format( + preds, target, self.num_labels, threshold=0.0, ignore_index=self.ignore_index, should_threshold=False + ) + measure, n_elements = _multilabel_coverage_error_update(preds, target) + self.measure += measure + self.total += n_elements + + def compute(self) -> Tensor: + return _ranking_reduce(self.measure, self.total) + + +class MultilabelRankingAveragePrecision(Metric): + """Computes label ranking average precision score for multilabel data [1]. The score is the average over each + ground truth label assigned to each sample of the ratio of true vs. total labels with lower score. Best score + is 1. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor + containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider + the input to be logits and will auto apply sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor + containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mlrap`` (:class:`~torch.Tensor`): A tensor containing the multilabel ranking average precision. + + Args: + num_labels: Integer specifing the number of labels + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example: + >>> from torchmetrics.classification import MultilabelRankingAveragePrecision + >>> _ = torch.manual_seed(42) + >>> preds = torch.rand(10, 5) + >>> target = torch.randint(2, (10, 5)) + >>> mlrap = MultilabelRankingAveragePrecision(num_labels=5) + >>> mlrap(preds, target) + tensor(0.7744) + """ + + higher_is_better: bool = True + is_differentiable: bool = False + full_state_update: bool = False + + def __init__( + self, + num_labels: int, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index) + self.validate_args = validate_args + self.num_labels = num_labels + self.ignore_index = ignore_index + self.add_state("measure", torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + if self.validate_args: + _multilabel_ranking_tensor_validation(preds, target, self.num_labels, self.ignore_index) + preds, target = _multilabel_confusion_matrix_format( + preds, target, self.num_labels, threshold=0.0, ignore_index=self.ignore_index, should_threshold=False + ) + measure, n_elements = _multilabel_ranking_average_precision_update(preds, target) + self.measure += measure + self.total += n_elements + + def compute(self) -> Tensor: + return _ranking_reduce(self.measure, self.total) + + +class MultilabelRankingLoss(Metric): + """Computes the label ranking loss for multilabel data [1]. The score is corresponds to the average number of + label pairs that are incorrectly ordered given some predictions weighted by the size of the label set and the + number of labels not in the label set. The best score is 0. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor + containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider + the input to be logits and will auto apply sigmoid per element. + - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor + containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). + + .. note:: + Additional dimension ``...`` will be flattened into the batch dimension. + + As output to ``forward`` and ``compute`` the metric returns the following output: + + - ``mlrl`` (:class:`~torch.Tensor`): A tensor containing the multilabel ranking loss. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example: + >>> from torchmetrics.classification import MultilabelRankingLoss + >>> _ = torch.manual_seed(42) + >>> preds = torch.rand(10, 5) + >>> target = torch.randint(2, (10, 5)) + >>> mlrl = MultilabelRankingLoss(num_labels=5) + >>> mlrl(preds, target) + tensor(0.4167) + """ + + higher_is_better: bool = False + is_differentiable: bool = False + full_state_update: bool = False + + def __init__( + self, + num_labels: int, + ignore_index: Optional[int] = None, + validate_args: bool = True, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + if validate_args: + _multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index) + self.validate_args = validate_args + self.num_labels = num_labels + self.ignore_index = ignore_index + self.add_state("measure", torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore + if self.validate_args: + _multilabel_ranking_tensor_validation(preds, target, self.num_labels, self.ignore_index) + preds, target = _multilabel_confusion_matrix_format( + preds, target, self.num_labels, threshold=0.0, ignore_index=self.ignore_index, should_threshold=False + ) + measure, n_elements = _multilabel_ranking_loss_update(preds, target) + self.measure += measure + self.total += n_elements + + def compute(self) -> Tensor: + return _ranking_reduce(self.measure, self.total) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/collections.py b/wemm/lib/python3.10/site-packages/torchmetrics/collections.py new file mode 100644 index 0000000000000000000000000000000000000000..5554dbaa13a74da4303e479de90849e9def15e6b --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/collections.py @@ -0,0 +1,483 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# this is just a bypass for this module name collision with build-in one +from collections import OrderedDict +from copy import deepcopy +from typing import Any, Dict, Hashable, Iterable, List, Optional, Sequence, Tuple, Union + +import torch +from torch import Tensor +from torch.nn import Module, ModuleDict + +from torchmetrics.metric import Metric +from torchmetrics.utilities import rank_zero_warn +from torchmetrics.utilities.data import _flatten_dict, allclose + + +class MetricCollection(ModuleDict): + """MetricCollection class can be used to chain metrics that have the same call pattern into one single class. + + Args: + metrics: One of the following + + * list or tuple (sequence): if metrics are passed in as a list or tuple, will use the metrics class name + as key for output dict. Therefore, two metrics of the same class cannot be chained this way. + + * arguments: similar to passing in as a list, metrics passed in as arguments will use their metric + class name as key for the output dict. + + * dict: if metrics are passed in as a dict, will use each key in the dict as key for output dict. + Use this format if you want to chain together multiple of the same metric with different parameters. + Note that the keys in the output dict will be sorted alphabetically. + + prefix: a string to append in front of the keys of the output dict + + postfix: a string to append after the keys of the output dict + + compute_groups: + By default the MetricCollection will try to reduce the computations needed for the metrics in the collection + by checking if they belong to the same **compute group**. All metrics in a compute group share the same + metric state and are therefore only different in their compute step e.g. accuracy, precision and recall + can all be computed from the true positives/negatives and false positives/negatives. By default, + this argument is ``True`` which enables this feature. Set this argument to `False` for disabling + this behaviour. Can also be set to a list of lists of metrics for setting the compute groups yourself. + + .. note:: + The compute groups feature can significatly speedup the calculation of metrics under the right conditions. + First, the feature is only available when calling the ``update`` method and not when calling ``forward`` method + due to the internal logic of ``forward`` preventing this. Secondly, since we compute groups share metric + states by reference, calling ``.items()``, ``.values()`` etc. on the metric collection will break this + reference and a copy of states are instead returned in this case (reference will be reestablished on the next + call to ``update``). + + .. note:: + Metric collections can be nested at initilization (see last example) but the output of the collection will + still be a single flatten dictionary combining the prefix and postfix arguments from the nested collection. + + Raises: + ValueError: + If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``. + ValueError: + If two elements in ``metrics`` have the same ``name``. + ValueError: + If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``. + ValueError: + If ``metrics`` is ``dict`` and additional_metrics are passed in. + ValueError: + If ``prefix`` is set and it is not a string. + ValueError: + If ``postfix`` is set and it is not a string. + + Example (input as list): + >>> import torch + >>> from pprint import pprint + >>> from torchmetrics import MetricCollection, MeanSquaredError + >>> from torchmetrics.classification import MulticlassAccuracy, MulticlassPrecision, MulticlassRecall + >>> target = torch.tensor([0, 2, 0, 2, 0, 1, 0, 2]) + >>> preds = torch.tensor([2, 1, 2, 0, 1, 2, 2, 2]) + >>> metrics = MetricCollection([MulticlassAccuracy(num_classes=3, average='micro'), + ... MulticlassPrecision(num_classes=3, average='macro'), + ... MulticlassRecall(num_classes=3, average='macro')]) + >>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE + {'MulticlassAccuracy': tensor(0.1250), + 'MulticlassPrecision': tensor(0.0667), + 'MulticlassRecall': tensor(0.1111)} + + Example (input as arguments): + >>> metrics = MetricCollection(MulticlassAccuracy(num_classes=3, average='micro'), + ... MulticlassPrecision(num_classes=3, average='macro'), + ... MulticlassRecall(num_classes=3, average='macro')) + >>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE + {'MulticlassAccuracy': tensor(0.1250), + 'MulticlassPrecision': tensor(0.0667), + 'MulticlassRecall': tensor(0.1111)} + + Example (input as dict): + >>> metrics = MetricCollection({'micro_recall': MulticlassRecall(num_classes=3, average='micro'), + ... 'macro_recall': MulticlassRecall(num_classes=3, average='macro')}) + >>> same_metric = metrics.clone() + >>> pprint(metrics(preds, target)) + {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} + >>> pprint(same_metric(preds, target)) + {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} + + Example (specification of compute groups): + >>> metrics = MetricCollection( + ... MulticlassRecall(num_classes=3, average='macro'), + ... MulticlassPrecision(num_classes=3, average='macro'), + ... MeanSquaredError(), + ... compute_groups=[['MulticlassRecall', 'MulticlassPrecision'], ['MeanSquaredError']] + ... ) + >>> metrics.update(preds, target) + >>> pprint(metrics.compute()) + {'MeanSquaredError': tensor(2.3750), 'MulticlassPrecision': tensor(0.0667), 'MulticlassRecall': tensor(0.1111)} + >>> pprint(metrics.compute_groups) + {0: ['MulticlassRecall', 'MulticlassPrecision'], 1: ['MeanSquaredError']} + + Example (nested metric collections): + >>> metrics = MetricCollection([ + ... MetricCollection([ + ... MulticlassAccuracy(num_classes=3, average='macro'), + ... MulticlassPrecision(num_classes=3, average='macro') + ... ], postfix='_macro'), + ... MetricCollection([ + ... MulticlassAccuracy(num_classes=3, average='micro'), + ... MulticlassPrecision(num_classes=3, average='micro') + ... ], postfix='_micro'), + ... ], prefix='valmetrics/') + >>> pprint(metrics(preds, target)) # doctest: +NORMALIZE_WHITESPACE + {'valmetrics/MulticlassAccuracy_macro': tensor(0.1111), + 'valmetrics/MulticlassAccuracy_micro': tensor(0.1250), + 'valmetrics/MulticlassPrecision_macro': tensor(0.0667), + 'valmetrics/MulticlassPrecision_micro': tensor(0.1250)} + """ + + _groups: Dict[int, List[str]] + + def __init__( + self, + metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], + *additional_metrics: Metric, + prefix: Optional[str] = None, + postfix: Optional[str] = None, + compute_groups: Union[bool, List[List[str]]] = True, + ) -> None: + super().__init__() + + self.prefix = self._check_arg(prefix, "prefix") + self.postfix = self._check_arg(postfix, "postfix") + self._enable_compute_groups = compute_groups + self._groups_checked: bool = False + self._state_is_copy: bool = False + + self.add_metrics(metrics, *additional_metrics) + + @torch.jit.unused + def forward(self, *args: Any, **kwargs: Any) -> Dict[str, Any]: + """Iteratively call forward for each metric. + + Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs) + will be filtered based on the signature of the individual metric. + """ + res = {k: m(*args, **m._filter_kwargs(**kwargs)) for k, m in self.items(keep_base=True, copy_state=False)} + res = _flatten_dict(res) + return {self._set_name(k): v for k, v in res.items()} + + def update(self, *args: Any, **kwargs: Any) -> None: + """Iteratively call update for each metric. + + Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs) + will be filtered based on the signature of the individual metric. + """ + # Use compute groups if already initialized and checked + if self._groups_checked: + for _, cg in self._groups.items(): + # only update the first member + m0 = getattr(self, cg[0]) + m0.update(*args, **m0._filter_kwargs(**kwargs)) + if self._state_is_copy: + # If we have deep copied state inbetween updates, reestablish link + self._compute_groups_create_state_ref() + self._state_is_copy = False + else: # the first update always do per metric to form compute groups + for _, m in self.items(keep_base=True, copy_state=False): + m_kwargs = m._filter_kwargs(**kwargs) + m.update(*args, **m_kwargs) + + if self._enable_compute_groups: + self._merge_compute_groups() + # create reference between states + self._compute_groups_create_state_ref() + self._groups_checked = True + + def _merge_compute_groups(self) -> None: + """Iterates over the collection of metrics, checking if the state of each metric matches another. + + If so, their compute groups will be merged into one. The complexity of the method is approximately + ``O(number_of_metrics_in_collection ** 2)``, as all metrics need to be compared to all other metrics. + """ + n_groups = len(self._groups) + while True: + for cg_idx1, cg_members1 in deepcopy(self._groups).items(): + for cg_idx2, cg_members2 in deepcopy(self._groups).items(): + if cg_idx1 == cg_idx2: + continue + + metric1 = getattr(self, cg_members1[0]) + metric2 = getattr(self, cg_members2[0]) + + if self._equal_metric_states(metric1, metric2): + self._groups[cg_idx1].extend(self._groups.pop(cg_idx2)) + break + + # Start over if we merged groups + if len(self._groups) != n_groups: + break + + # Stop when we iterate over everything and do not merge any groups + if len(self._groups) == n_groups: + break + else: + n_groups = len(self._groups) + + # Re-index groups + temp = deepcopy(self._groups) + self._groups = {} + for idx, values in enumerate(temp.values()): + self._groups[idx] = values + + @staticmethod + def _equal_metric_states(metric1: Metric, metric2: Metric) -> bool: + """Check if the metric state of two metrics are the same.""" + # empty state + if len(metric1._defaults) == 0 or len(metric2._defaults) == 0: + return False + + if metric1._defaults.keys() != metric2._defaults.keys(): + return False + + for key in metric1._defaults.keys(): + state1 = getattr(metric1, key) + state2 = getattr(metric2, key) + + if type(state1) != type(state2): + return False + + if isinstance(state1, Tensor) and isinstance(state2, Tensor): + return state1.shape == state2.shape and allclose(state1, state2) + + if isinstance(state1, list) and isinstance(state2, list): + return all(s1.shape == s2.shape and allclose(s1, s2) for s1, s2 in zip(state1, state2)) + + return True + + def _compute_groups_create_state_ref(self, copy: bool = False) -> None: + """Create reference between metrics in the same compute group. + + Args: + copy: If `True` the metric state will between members will be copied instead + of just passed by reference + """ + if not self._state_is_copy: + for _, cg in self._groups.items(): + m0 = getattr(self, cg[0]) + for i in range(1, len(cg)): + mi = getattr(self, cg[i]) + for state in m0._defaults: + m0_state = getattr(m0, state) + # Determine if we just should set a reference or a full copy + setattr(mi, state, deepcopy(m0_state) if copy else m0_state) + setattr(mi, "_update_count", deepcopy(m0._update_count) if copy else m0._update_count) + self._state_is_copy = copy + + def compute(self) -> Dict[str, Any]: + """Compute the result for each metric in the collection.""" + res = {k: m.compute() for k, m in self.items(keep_base=True, copy_state=False)} + res = _flatten_dict(res) + return {self._set_name(k): v for k, v in res.items()} + + def reset(self) -> None: + """Iteratively call reset for each metric.""" + for _, m in self.items(keep_base=True, copy_state=False): + m.reset() + if self._enable_compute_groups and self._groups_checked: + # reset state reference + self._compute_groups_create_state_ref() + + def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> "MetricCollection": + """Make a copy of the metric collection + Args: + prefix: a string to append in front of the metric keys + postfix: a string to append after the keys of the output dict + + """ + mc = deepcopy(self) + if prefix: + mc.prefix = self._check_arg(prefix, "prefix") + if postfix: + mc.postfix = self._check_arg(postfix, "postfix") + return mc + + def persistent(self, mode: bool = True) -> None: + """Method for post-init to change if metric states should be saved to its state_dict.""" + for _, m in self.items(keep_base=True, copy_state=False): + m.persistent(mode) + + def add_metrics( + self, metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], *additional_metrics: Metric + ) -> None: + """Add new metrics to Metric Collection.""" + if isinstance(metrics, Metric): + # set compatible with original type expectations + metrics = [metrics] + if isinstance(metrics, Sequence): + # prepare for optional additions + metrics = list(metrics) + remain: list = [] + for m in additional_metrics: + (metrics if isinstance(m, Metric) else remain).append(m) + + if remain: + rank_zero_warn( + f"You have passes extra arguments {remain} which are not `Metric` so they will be ignored." + ) + elif additional_metrics: + raise ValueError( + f"You have passes extra arguments {additional_metrics} which are not compatible" + f" with first passed dictionary {metrics} so they will be ignored." + ) + + if isinstance(metrics, dict): + # Check all values are metrics + # Make sure that metrics are added in deterministic order + for name in sorted(metrics.keys()): + metric = metrics[name] + if not isinstance(metric, (Metric, MetricCollection)): + raise ValueError( + f"Value {metric} belonging to key {name} is not an instance of" + " `torchmetrics.Metric` or `torchmetrics.MetricCollection`" + ) + if isinstance(metric, Metric): + self[name] = metric + else: + for k, v in metric.items(keep_base=False): + self[f"{name}_{k}"] = v + elif isinstance(metrics, Sequence): + for metric in metrics: + if not isinstance(metric, (Metric, MetricCollection)): + raise ValueError( + f"Input {metric} to `MetricCollection` is not a instance of" + " `torchmetrics.Metric` or `torchmetrics.MetricCollection`" + ) + if isinstance(metric, Metric): + name = metric.__class__.__name__ + if name in self: + raise ValueError(f"Encountered two metrics both named {name}") + self[name] = metric + else: + for k, v in metric.items(keep_base=False): + self[k] = v + else: + raise ValueError("Unknown input to MetricCollection.") + + self._groups_checked = False + if self._enable_compute_groups: + self._init_compute_groups() + else: + self._groups = {} + + def _init_compute_groups(self) -> None: + """Initialize compute groups. + + If user provided a list, we check that all metrics in the list are also in the collection. If set to `True` we + simply initialize each metric in the collection as its own group + """ + if isinstance(self._enable_compute_groups, list): + self._groups = {i: k for i, k in enumerate(self._enable_compute_groups)} + for v in self._groups.values(): + for metric in v: + if metric not in self: + raise ValueError( + f"Input {metric} in `compute_groups` argument does not match a metric in the collection." + f" Please make sure that {self._enable_compute_groups} matches {self.keys(keep_base=True)}" + ) + self._groups_checked = True + else: + # Initialize all metrics as their own compute group + self._groups = {i: [str(k)] for i, k in enumerate(self.keys(keep_base=True))} + + @property + def compute_groups(self) -> Dict[int, List[str]]: + """Return a dict with the current compute groups in the collection.""" + return self._groups + + def _set_name(self, base: str) -> str: + """Adjust name of metric with both prefix and postfix.""" + name = base if self.prefix is None else self.prefix + base + name = name if self.postfix is None else name + self.postfix + return name + + def _to_renamed_ordered_dict(self) -> OrderedDict: + od = OrderedDict() + for k, v in self._modules.items(): + od[self._set_name(k)] = v + return od + + def keys(self, keep_base: bool = False) -> Iterable[Hashable]: + r"""Return an iterable of the ModuleDict key. + + Args: + keep_base: Whether to add prefix/postfix on the items collection. + """ + if keep_base: + return self._modules.keys() + return self._to_renamed_ordered_dict().keys() + + def items(self, keep_base: bool = False, copy_state: bool = True) -> Iterable[Tuple[str, Module]]: + r"""Return an iterable of the ModuleDict key/value pairs. + + Args: + keep_base: Whether to add prefix/postfix on the collection. + copy_state: + If metric states should be copied between metrics in the same compute group or just passed by reference + """ + self._compute_groups_create_state_ref(copy_state) + if keep_base: + return self._modules.items() + return self._to_renamed_ordered_dict().items() + + def values(self, copy_state: bool = True) -> Iterable[Module]: + """Return an iterable of the ModuleDict values. + + Args: + copy_state: + If metric states should be copied between metrics in the same compute group or just passed by reference + """ + self._compute_groups_create_state_ref(copy_state) + return self._modules.values() + + def __getitem__(self, key: str, copy_state: bool = True) -> Module: + """Retrieve a single metric from the collection. + + Args: + key: name of metric to retrieve + copy_state: + If metric states should be copied between metrics in the same compute group or just passed by reference + """ + self._compute_groups_create_state_ref(copy_state) + return self._modules[key] + + @staticmethod + def _check_arg(arg: Optional[str], name: str) -> Optional[str]: + if arg is None or isinstance(arg, str): + return arg + raise ValueError(f"Expected input `{name}` to be a string, but got {type(arg)}") + + def __repr__(self) -> str: + repr_str = super().__repr__()[:-2] + if self.prefix: + repr_str += f",\n prefix={self.prefix}{',' if self.postfix else ''}" + if self.postfix: + repr_str += f"{',' if not self.prefix else ''}\n postfix={self.postfix}" + return repr_str + "\n)" + + def set_dtype(self, dst_type: Union[str, torch.dtype]) -> "MetricCollection": + """Transfer all metric state to specific dtype. Special version of standard `type` method. + + Arguments: + dst_type (type or string): the desired type. + """ + for _, m in self.items(keep_base=True, copy_state=False): + m.set_dtype(dst_type) + return self diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__init__.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6aa93dd92610ae3a6107e27ac47c9a6c52f87a41 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__init__.py @@ -0,0 +1,125 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from torchmetrics.functional.classification.accuracy import ( # noqa: F401 + accuracy, + binary_accuracy, + multiclass_accuracy, + multilabel_accuracy, +) +from torchmetrics.functional.classification.auroc import ( # noqa: F401 + auroc, + binary_auroc, + multiclass_auroc, + multilabel_auroc, +) +from torchmetrics.functional.classification.average_precision import ( # noqa: F401 + average_precision, + binary_average_precision, + multiclass_average_precision, + multilabel_average_precision, +) +from torchmetrics.functional.classification.calibration_error import ( # noqa: F401 + binary_calibration_error, + calibration_error, + multiclass_calibration_error, +) +from torchmetrics.functional.classification.cohen_kappa import ( # noqa: F401 + binary_cohen_kappa, + cohen_kappa, + multiclass_cohen_kappa, +) +from torchmetrics.functional.classification.confusion_matrix import ( # noqa: F401 + binary_confusion_matrix, + confusion_matrix, + multiclass_confusion_matrix, + multilabel_confusion_matrix, +) +from torchmetrics.functional.classification.dice import dice # noqa: F401 +from torchmetrics.functional.classification.exact_match import ( # noqa: F401 + exact_match, + multiclass_exact_match, + multilabel_exact_match, +) +from torchmetrics.functional.classification.f_beta import ( # noqa: F401 + binary_f1_score, + binary_fbeta_score, + f1_score, + fbeta_score, + multiclass_f1_score, + multiclass_fbeta_score, + multilabel_f1_score, + multilabel_fbeta_score, +) +from torchmetrics.functional.classification.hamming import ( # noqa: F401 + binary_hamming_distance, + hamming_distance, + multiclass_hamming_distance, + multilabel_hamming_distance, +) +from torchmetrics.functional.classification.hinge import ( # noqa: F401 + binary_hinge_loss, + hinge_loss, + multiclass_hinge_loss, +) +from torchmetrics.functional.classification.jaccard import ( # noqa: F401 + binary_jaccard_index, + jaccard_index, + multiclass_jaccard_index, + multilabel_jaccard_index, +) +from torchmetrics.functional.classification.matthews_corrcoef import ( # noqa: F401 + binary_matthews_corrcoef, + matthews_corrcoef, + multiclass_matthews_corrcoef, + multilabel_matthews_corrcoef, +) +from torchmetrics.functional.classification.precision_recall import ( # noqa: F401 + binary_precision, + binary_recall, + multiclass_precision, + multiclass_recall, + multilabel_precision, + multilabel_recall, + precision, + recall, +) +from torchmetrics.functional.classification.precision_recall_curve import ( # noqa: F401 + binary_precision_recall_curve, + multiclass_precision_recall_curve, + multilabel_precision_recall_curve, + precision_recall_curve, +) +from torchmetrics.functional.classification.ranking import ( # noqa: F401 + multilabel_coverage_error, + multilabel_ranking_average_precision, + multilabel_ranking_loss, +) +from torchmetrics.functional.classification.recall_at_fixed_precision import ( # noqa: F401 + binary_recall_at_fixed_precision, + multiclass_recall_at_fixed_precision, + multilabel_recall_at_fixed_precision, +) +from torchmetrics.functional.classification.roc import binary_roc, multiclass_roc, multilabel_roc, roc # noqa: F401 +from torchmetrics.functional.classification.specificity import ( # noqa: F401 + binary_specificity, + multiclass_specificity, + multilabel_specificity, + specificity, +) +from torchmetrics.functional.classification.stat_scores import ( # noqa: F401 + binary_stat_scores, + multiclass_stat_scores, + multilabel_stat_scores, + stat_scores, +) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/auroc.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/auroc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5a4fde3d988f955fb84f555283f8b17f96a527d Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/auroc.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/average_precision.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/average_precision.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b52a5a84b047873210cc5a9a14556dbc4bb3ede Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/average_precision.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/calibration_error.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/calibration_error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be4a0f72bd18b078b841c171108b9c45442add61 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/calibration_error.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/jaccard.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/jaccard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..582d75f05bb45dd47b15e2a00e7d5fe5c39ef183 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/jaccard.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/precision_recall_curve.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/precision_recall_curve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d35b2880cde76b29a2b15bfcc380f2ddbf92160 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/precision_recall_curve.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/ranking.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/ranking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3448ea1580cf9431bba7555bd0847d3af8cde8ee Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/ranking.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/roc.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/roc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cb2653059434b15e0ba116bfea36ed36aa24f18 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/roc.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/accuracy.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/accuracy.py new file mode 100644 index 0000000000000000000000000000000000000000..9f43e6833001fdc1381178875ce6fdd33b4a80c7 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/accuracy.py @@ -0,0 +1,428 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.stat_scores import ( + _binary_stat_scores_arg_validation, + _binary_stat_scores_format, + _binary_stat_scores_tensor_validation, + _binary_stat_scores_update, + _multiclass_stat_scores_arg_validation, + _multiclass_stat_scores_format, + _multiclass_stat_scores_tensor_validation, + _multiclass_stat_scores_update, + _multilabel_stat_scores_arg_validation, + _multilabel_stat_scores_format, + _multilabel_stat_scores_tensor_validation, + _multilabel_stat_scores_update, +) +from torchmetrics.utilities.compute import _safe_divide + + +def _accuracy_reduce( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]], + multidim_average: Literal["global", "samplewise"] = "global", + multilabel: bool = False, +) -> Tensor: + """Reduce classification statistics into accuracy score + Args: + tp: number of true positives + fp: number of false positives + tn: number of true negatives + fn: number of false negatives + normalize: normalization method. + - `"true"` will divide by the sum of the column dimension. + - `"pred"` will divide by the sum of the row dimension. + - `"all"` will divide by the sum of the full matrix + - `"none"` or `None` will apply no reduction + multilabel: bool indicating if reduction is for multilabel tasks + + Returns: + Accuracy score + """ + if average == "binary": + return _safe_divide(tp + tn, tp + tn + fp + fn) + elif average == "micro": + tp = tp.sum(dim=0 if multidim_average == "global" else 1) + fn = fn.sum(dim=0 if multidim_average == "global" else 1) + if multilabel: + fp = fp.sum(dim=0 if multidim_average == "global" else 1) + tn = tn.sum(dim=0 if multidim_average == "global" else 1) + return _safe_divide(tp + tn, tp + tn + fp + fn) + return _safe_divide(tp, tp + fn) + else: + if multilabel: + score = _safe_divide(tp + tn, tp + tn + fp + fn) + else: + score = _safe_divide(tp, tp + fn) + if average is None or average == "none": + return score + if average == "weighted": + weights = tp + fn + else: + weights = torch.ones_like(score) + return _safe_divide(weights * score, weights.sum(-1, keepdim=True)).sum(-1) + + +def binary_accuracy( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Accuracy`_ for binary tasks: + + .. math:: + \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a + tensor of predictions. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + threshold: Threshold for transforming probability to binary {0,1} predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average`` + is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import binary_accuracy + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> binary_accuracy(preds, target) + tensor(0.6667) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import binary_accuracy + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> binary_accuracy(preds, target) + tensor(0.6667) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import binary_accuracy + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> binary_accuracy(preds, target, multidim_average='samplewise') + tensor([0.3333, 0.1667]) + """ + if validate_args: + _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index) + _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index) + preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index) + tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average) + return _accuracy_reduce(tp, fp, tn, fn, average="binary", multidim_average=multidim_average) + + +def multiclass_accuracy( + preds: Tensor, + target: Tensor, + num_classes: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + top_k: int = 1, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Accuracy`_ for multiclass tasks: + + .. math:: + \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a + tensor of predictions. + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + top_k: + Number of highest probability or logit score predictions considered to find the correct label. + Only works when ``preds`` contain probabilities/logits. + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multiclass_accuracy + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> multiclass_accuracy(preds, target, num_classes=3) + tensor(0.8333) + >>> multiclass_accuracy(preds, target, num_classes=3, average=None) + tensor([0.5000, 1.0000, 1.0000]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multiclass_accuracy + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> multiclass_accuracy(preds, target, num_classes=3) + tensor(0.8333) + >>> multiclass_accuracy(preds, target, num_classes=3, average=None) + tensor([0.5000, 1.0000, 1.0000]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multiclass_accuracy + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> multiclass_accuracy(preds, target, num_classes=3, multidim_average='samplewise') + tensor([0.5000, 0.2778]) + >>> multiclass_accuracy(preds, target, num_classes=3, multidim_average='samplewise', average=None) + tensor([[1.0000, 0.0000, 0.5000], + [0.0000, 0.3333, 0.5000]]) + """ + if validate_args: + _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index) + _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index) + preds, target = _multiclass_stat_scores_format(preds, target, top_k) + tp, fp, tn, fn = _multiclass_stat_scores_update( + preds, target, num_classes, top_k, average, multidim_average, ignore_index + ) + return _accuracy_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average) + + +def multilabel_accuracy( + preds: Tensor, + target: Tensor, + num_labels: int, + threshold: float = 0.5, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Accuracy`_ for multilabel tasks: + + .. math:: + \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a + tensor of predictions. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multilabel_accuracy + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> multilabel_accuracy(preds, target, num_labels=3) + tensor(0.6667) + >>> multilabel_accuracy(preds, target, num_labels=3, average=None) + tensor([1.0000, 0.5000, 0.5000]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multilabel_accuracy + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> multilabel_accuracy(preds, target, num_labels=3) + tensor(0.6667) + >>> multilabel_accuracy(preds, target, num_labels=3, average=None) + tensor([1.0000, 0.5000, 0.5000]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multilabel_accuracy + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> multilabel_accuracy(preds, target, num_labels=3, multidim_average='samplewise') + tensor([0.3333, 0.1667]) + >>> multilabel_accuracy(preds, target, num_labels=3, multidim_average='samplewise', average=None) + tensor([[0.5000, 0.5000, 0.0000], + [0.0000, 0.0000, 0.5000]]) + """ + if validate_args: + _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index) + _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index) + preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index) + tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average) + return _accuracy_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average, multilabel=True) + + +def accuracy( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + multidim_average: Optional[Literal["global", "samplewise"]] = "global", + top_k: Optional[int] = 1, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Accuracy`_ + + .. math:: + \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_accuracy`, :func:`multiclass_accuracy` and :func:`multilabel_accuracy` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> import torch + >>> target = torch.tensor([0, 1, 2, 3]) + >>> preds = torch.tensor([0, 2, 1, 3]) + >>> accuracy(preds, target, task="multiclass", num_classes=4) + tensor(0.5000) + + >>> target = torch.tensor([0, 1, 2]) + >>> preds = torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) + >>> accuracy(preds, target, task="multiclass", num_classes=3, top_k=2) + tensor(0.6667) + """ + assert multidim_average is not None + if task == "binary": + return binary_accuracy(preds, target, threshold, multidim_average, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + assert isinstance(top_k, int) + return multiclass_accuracy( + preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args + ) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_accuracy( + preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args + ) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/auroc.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/auroc.py new file mode 100644 index 0000000000000000000000000000000000000000..ddfccb392c458e894aa2c5f75e6e2aca1787482f --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/auroc.py @@ -0,0 +1,463 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor, tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.precision_recall_curve import ( + _binary_precision_recall_curve_arg_validation, + _binary_precision_recall_curve_format, + _binary_precision_recall_curve_tensor_validation, + _binary_precision_recall_curve_update, + _multiclass_precision_recall_curve_arg_validation, + _multiclass_precision_recall_curve_format, + _multiclass_precision_recall_curve_tensor_validation, + _multiclass_precision_recall_curve_update, + _multilabel_precision_recall_curve_arg_validation, + _multilabel_precision_recall_curve_format, + _multilabel_precision_recall_curve_tensor_validation, + _multilabel_precision_recall_curve_update, +) +from torchmetrics.functional.classification.roc import ( + _binary_roc_compute, + _multiclass_roc_compute, + _multilabel_roc_compute, +) +from torchmetrics.utilities.compute import _auc_compute_without_check, _safe_divide +from torchmetrics.utilities.data import _bincount +from torchmetrics.utilities.prints import rank_zero_warn + + +def _reduce_auroc( + fpr: Union[Tensor, List[Tensor]], + tpr: Union[Tensor, List[Tensor]], + average: Optional[Literal["macro", "weighted", "none"]] = "macro", + weights: Optional[Tensor] = None, +) -> Tensor: + """Utility function for reducing multiple average precision score into one number.""" + if isinstance(fpr, Tensor): + res = _auc_compute_without_check(fpr, tpr, 1.0, axis=1) + else: + res = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)] + res = torch.stack(res) + if average is None or average == "none": + return res + if torch.isnan(res).any(): + rank_zero_warn( + f"Average precision score for one or more classes was `nan`. Ignoring these classes in {average}-average", + UserWarning, + ) + idx = ~torch.isnan(res) + if average == "macro": + return res[idx].mean() + elif average == "weighted" and weights is not None: + weights = _safe_divide(weights[idx], weights[idx].sum()) + return (res[idx] * weights).sum() + else: + raise ValueError("Received an incompatible combinations of inputs to make reduction.") + + +def _binary_auroc_arg_validation( + max_fpr: Optional[float] = None, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> None: + _binary_precision_recall_curve_arg_validation(thresholds, ignore_index) + if max_fpr is not None and not isinstance(max_fpr, float) and 0 < max_fpr <= 1: + raise ValueError(f"Arguments `max_fpr` should be a float in range (0, 1], but got: {max_fpr}") + + +def _binary_auroc_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + thresholds: Optional[Tensor], + max_fpr: Optional[float] = None, + pos_label: int = 1, +) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor]]: + fpr, tpr, _ = _binary_roc_compute(state, thresholds, pos_label) + if max_fpr is None or max_fpr == 1: + return _auc_compute_without_check(fpr, tpr, 1.0) + + _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device + max_area: Tensor = tensor(max_fpr, device=_device) + # Add a single point at max_fpr and interpolate its tpr value + stop = torch.bucketize(max_area, fpr, out_int32=True, right=True) + weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1]) + interp_tpr: Tensor = torch.lerp(tpr[stop - 1], tpr[stop], weight) + tpr = torch.cat([tpr[:stop], interp_tpr.view(1)]) + fpr = torch.cat([fpr[:stop], max_area.view(1)]) + + # Compute partial AUC + partial_auc = _auc_compute_without_check(fpr, tpr, 1.0) + + # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal + min_area: Tensor = 0.5 * max_area**2 + return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) + + +def binary_auroc( + preds: Tensor, + target: Tensor, + max_fpr: Optional[float] = None, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tuple[Tensor, Tensor, Tensor]: + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for binary tasks. The AUROC + score summarizes the ROC curve into an single number that describes the performance of a model for multiple + thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 + corresponds to random guessing. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class. + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + max_fpr: If not ``None``, calculates standardized partial AUC over the range ``[0, max_fpr]``. + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + A single scalar with the auroc score + + Example: + >>> from torchmetrics.functional.classification import binary_auroc + >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) + >>> target = torch.tensor([0, 1, 1, 0]) + >>> binary_auroc(preds, target, thresholds=None) + tensor(0.5000) + >>> binary_auroc(preds, target, thresholds=5) + tensor(0.5000) + """ + if validate_args: + _binary_auroc_arg_validation(max_fpr, thresholds, ignore_index) + _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index) + preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index) + state = _binary_precision_recall_curve_update(preds, target, thresholds) + return _binary_auroc_compute(state, thresholds, max_fpr) + + +def _multiclass_auroc_arg_validation( + num_classes: int, + average: Optional[Literal["macro", "weighted", "none"]] = "macro", + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> None: + _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index) + allowed_average = ("macro", "weighted", "none", None) + if average not in allowed_average: + raise ValueError(f"Expected argument `average` to be one of {allowed_average} but got {average}") + + +def _multiclass_auroc_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + num_classes: int, + average: Optional[Literal["macro", "weighted", "none"]] = "macro", + thresholds: Optional[Tensor] = None, +) -> Tensor: + fpr, tpr, _ = _multiclass_roc_compute(state, num_classes, thresholds) + return _reduce_auroc( + fpr, + tpr, + average, + weights=_bincount(state[1], minlength=num_classes).float() if thresholds is None else state[0][:, 1, :].sum(-1), + ) + + +def multiclass_auroc( + preds: Tensor, + target: Tensor, + num_classes: int, + average: Optional[Literal["macro", "weighted", "none"]] = "macro", + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multiclass tasks. The AUROC + score summarizes the ROC curve into an single number that describes the performance of a model for multiple + thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 + corresponds to random guessing. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + softmax per sample. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over classes. Should be one of the following: + + - ``macro``: Calculate score for each class and average them + - ``weighted``: Calculates score for each class and computes weighted average using their support + - ``"none"`` or ``None``: Calculates score for each class and applies no reduction + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be returned with auroc score per class. + If `average="macro"|"weighted"` then a single scalar is returned. + + Example: + >>> from torchmetrics.functional.classification import multiclass_auroc + >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> multiclass_auroc(preds, target, num_classes=5, average="macro", thresholds=None) + tensor(0.5333) + >>> multiclass_auroc(preds, target, num_classes=5, average=None, thresholds=None) + tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000]) + >>> multiclass_auroc(preds, target, num_classes=5, average="macro", thresholds=5) + tensor(0.5333) + >>> multiclass_auroc(preds, target, num_classes=5, average=None, thresholds=5) + tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000]) + """ + if validate_args: + _multiclass_auroc_arg_validation(num_classes, average, thresholds, ignore_index) + _multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index) + preds, target, thresholds = _multiclass_precision_recall_curve_format( + preds, target, num_classes, thresholds, ignore_index + ) + state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds) + return _multiclass_auroc_compute(state, num_classes, average, thresholds) + + +def _multilabel_auroc_arg_validation( + num_labels: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]], + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> None: + _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index) + allowed_average = ("micro", "macro", "weighted", "none", None) + if average not in allowed_average: + raise ValueError(f"Expected argument `average` to be one of {allowed_average} but got {average}") + + +def _multilabel_auroc_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + num_labels: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]], + thresholds: Optional[Tensor], + ignore_index: Optional[int] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tensor]: + if average == "micro": + if isinstance(state, Tensor) and thresholds is not None: + return _binary_auroc_compute(state.sum(1), thresholds, max_fpr=None) + else: + preds = state[0].flatten() + target = state[1].flatten() + if ignore_index is not None: + idx = target == ignore_index + preds = preds[~idx] + target = target[~idx] + return _binary_auroc_compute((preds, target), thresholds, max_fpr=None) + + else: + fpr, tpr, _ = _multilabel_roc_compute(state, num_labels, thresholds, ignore_index) + return _reduce_auroc( + fpr, + tpr, + average, + weights=(state[1] == 1).sum(dim=0).float() if thresholds is None else state[0][:, 1, :].sum(-1), + ) + + +def multilabel_auroc( + preds: Tensor, + target: Tensor, + num_labels: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multilabel tasks. The AUROC + score summarizes the ROC curve into an single number that describes the performance of a model for multiple + thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 + corresponds to random guessing. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum score over all labels + - ``macro``: Calculate score for each label and average them + - ``weighted``: Calculates score for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates score for each label and applies no reduction + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be returned with auroc score per class. + If `average="micro|macro"|"weighted"` then a single scalar is returned. + + Example: + >>> from torchmetrics.functional.classification import multilabel_auroc + >>> preds = torch.tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = torch.tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) + >>> multilabel_auroc(preds, target, num_labels=3, average="macro", thresholds=None) + tensor(0.6528) + >>> multilabel_auroc(preds, target, num_labels=3, average=None, thresholds=None) + tensor([0.6250, 0.5000, 0.8333]) + >>> multilabel_auroc(preds, target, num_labels=3, average="macro", thresholds=5) + tensor(0.6528) + >>> multilabel_auroc(preds, target, num_labels=3, average=None, thresholds=5) + tensor([0.6250, 0.5000, 0.8333]) + """ + if validate_args: + _multilabel_auroc_arg_validation(num_labels, average, thresholds, ignore_index) + _multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index) + preds, target, thresholds = _multilabel_precision_recall_curve_format( + preds, target, num_labels, thresholds, ignore_index + ) + state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds) + return _multilabel_auroc_compute(state, num_labels, average, thresholds, ignore_index) + + +def auroc( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], Tensor]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["macro", "weighted", "none"]] = "macro", + max_fpr: Optional[float] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_). The AUROC score summarizes the + ROC curve into an single number that describes the performance of a model for multiple thresholds at the same + time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 corresponds to random guessing. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_auroc`, :func:`multiclass_auroc` and :func:`multilabel_auroc` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34]) + >>> target = torch.tensor([0, 0, 1, 1, 1]) + >>> auroc(preds, target, task='binary') + tensor(0.5000) + + >>> preds = torch.tensor([[0.90, 0.05, 0.05], + ... [0.05, 0.90, 0.05], + ... [0.05, 0.05, 0.90], + ... [0.85, 0.05, 0.10], + ... [0.10, 0.10, 0.80]]) + >>> target = torch.tensor([0, 1, 1, 2, 2]) + >>> auroc(preds, target, task='multiclass', num_classes=3) + tensor(0.7778) + """ + if task == "binary": + return binary_auroc(preds, target, max_fpr, thresholds, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + return multiclass_auroc(preds, target, num_classes, average, thresholds, ignore_index, validate_args) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_auroc(preds, target, num_labels, average, thresholds, ignore_index, validate_args) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/calibration_error.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/calibration_error.py new file mode 100644 index 0000000000000000000000000000000000000000..39c0ac16632615d8c6eecca4516754248b576091 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/calibration_error.py @@ -0,0 +1,356 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.confusion_matrix import ( + _binary_confusion_matrix_format, + _binary_confusion_matrix_tensor_validation, + _multiclass_confusion_matrix_format, + _multiclass_confusion_matrix_tensor_validation, +) + + +def _binning_bucketize( + confidences: Tensor, accuracies: Tensor, bin_boundaries: Tensor +) -> Tuple[Tensor, Tensor, Tensor]: + """Compute calibration bins using ``torch.bucketize``. Use for pytorch >= 1.6. + + Args: + confidences: The confidence (i.e. predicted prob) of the top1 prediction. + accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise. + bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1. + + Returns: + tuple with binned accuracy, binned confidence and binned probabilities + """ + accuracies = accuracies.to(dtype=confidences.dtype) + acc_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype) + conf_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype) + count_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype) + + indices = torch.bucketize(confidences, bin_boundaries) - 1 + + count_bin.scatter_add_(dim=0, index=indices, src=torch.ones_like(confidences)) + + conf_bin.scatter_add_(dim=0, index=indices, src=confidences) + conf_bin = torch.nan_to_num(conf_bin / count_bin) + + acc_bin.scatter_add_(dim=0, index=indices, src=accuracies) + acc_bin = torch.nan_to_num(acc_bin / count_bin) + + prop_bin = count_bin / count_bin.sum() + return acc_bin, conf_bin, prop_bin + + +def _ce_compute( + confidences: Tensor, + accuracies: Tensor, + bin_boundaries: Union[Tensor, int], + norm: str = "l1", + debias: bool = False, +) -> Tensor: + """Computes the calibration error given the provided bin boundaries and norm. + + Args: + confidences: The confidence (i.e. predicted prob) of the top1 prediction. + accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise. + bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1. + norm: Norm function to use when computing calibration error. Defaults to "l1". + debias: Apply debiasing to L2 norm computation as in + `Verified Uncertainty Calibration`_. Defaults to False. + + Raises: + ValueError: If an unsupported norm function is provided. + + Returns: + Tensor: Calibration error scalar. + """ + if isinstance(bin_boundaries, int): + bin_boundaries = torch.linspace(0, 1, bin_boundaries + 1, dtype=torch.float, device=confidences.device) + + if norm not in {"l1", "l2", "max"}: + raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ") + + with torch.no_grad(): + acc_bin, conf_bin, prop_bin = _binning_bucketize(confidences, accuracies, bin_boundaries) + + if norm == "l1": + ce = torch.sum(torch.abs(acc_bin - conf_bin) * prop_bin) + elif norm == "max": + ce = torch.max(torch.abs(acc_bin - conf_bin)) + elif norm == "l2": + ce = torch.sum(torch.pow(acc_bin - conf_bin, 2) * prop_bin) + # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn. + if debias: + # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from + # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/ + debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1) + ce += torch.sum(torch.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin + ce = torch.sqrt(ce) if ce > 0 else torch.tensor(0) + return ce + + +def _binary_calibration_error_arg_validation( + n_bins: int, + norm: Literal["l1", "l2", "max"] = "l1", + ignore_index: Optional[int] = None, +) -> None: + if not isinstance(n_bins, int) or n_bins < 1: + raise ValueError(f"Expected argument `n_bins` to be an integer larger than 0, but got {n_bins}") + allowed_norm = ("l1", "l2", "max") + if norm not in allowed_norm: + raise ValueError(f"Expected argument `norm` to be one of {allowed_norm}, but got {norm}.") + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + + +def _binary_calibration_error_tensor_validation( + preds: Tensor, target: Tensor, ignore_index: Optional[int] = None +) -> None: + _binary_confusion_matrix_tensor_validation(preds, target, ignore_index) + if not preds.is_floating_point(): + raise ValueError( + "Expected argument `preds` to be floating tensor with probabilities/logits" + f" but got tensor with dtype {preds.dtype}" + ) + + +def _binary_calibration_error_update(preds: Tensor, target: Tensor) -> Tensor: + confidences, accuracies = preds, target + return confidences, accuracies + + +def binary_calibration_error( + preds: Tensor, + target: Tensor, + n_bins: int = 15, + norm: Literal["l1", "l2", "max"] = "l1", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""`Top-label Calibration Error`_ for binary tasks. The expected calibration error can be used to quantify how + well a given model is calibrated e.g. how well the predicted output probabilities of the model matches the + actual probabilities of the ground truth distribution. + + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + .. math:: + \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)} + + .. math:: + \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)} + + .. math:: + \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)} + + Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of + predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed + in an uniform way in the [0,1] range. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class. + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + n_bins: Number of bins to use when computing the metric. + norm: Norm used to compare empirical and expected probability bins. + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example: + >>> from torchmetrics.functional.classification import binary_calibration_error + >>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75]) + >>> target = torch.tensor([0, 0, 1, 1, 1]) + >>> binary_calibration_error(preds, target, n_bins=2, norm='l1') + tensor(0.2900) + >>> binary_calibration_error(preds, target, n_bins=2, norm='l2') + tensor(0.2918) + >>> binary_calibration_error(preds, target, n_bins=2, norm='max') + tensor(0.3167) + """ + if validate_args: + _binary_calibration_error_arg_validation(n_bins, norm, ignore_index) + _binary_calibration_error_tensor_validation(preds, target, ignore_index) + preds, target = _binary_confusion_matrix_format( + preds, target, threshold=0.0, ignore_index=ignore_index, convert_to_labels=False + ) + confidences, accuracies = _binary_calibration_error_update(preds, target) + return _ce_compute(confidences, accuracies, n_bins, norm) + + +def _multiclass_calibration_error_arg_validation( + num_classes: int, + n_bins: int, + norm: Literal["l1", "l2", "max"] = "l1", + ignore_index: Optional[int] = None, +) -> None: + if not isinstance(num_classes, int) or num_classes < 2: + raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}") + if not isinstance(n_bins, int) or n_bins < 1: + raise ValueError(f"Expected argument `n_bins` to be an integer larger than 0, but got {n_bins}") + allowed_norm = ("l1", "l2", "max") + if norm not in allowed_norm: + raise ValueError(f"Expected argument `norm` to be one of {allowed_norm}, but got {norm}.") + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + + +def _multiclass_calibration_error_tensor_validation( + preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None +) -> None: + _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index) + if not preds.is_floating_point(): + raise ValueError( + "Expected argument `preds` to be floating tensor with probabilities/logits" + f" but got tensor with dtype {preds.dtype}" + ) + + +def _multiclass_calibration_error_update( + preds: Tensor, + target: Tensor, +) -> Tensor: + if not torch.all((0 <= preds) * (preds <= 1)): + preds = preds.softmax(1) + confidences, predictions = preds.max(dim=1) + accuracies = predictions.eq(target) + return confidences.float(), accuracies.float() + + +def multiclass_calibration_error( + preds: Tensor, + target: Tensor, + num_classes: int, + n_bins: int = 15, + norm: Literal["l1", "l2", "max"] = "l1", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""`Top-label Calibration Error`_ for multiclass tasks. The expected calibration error can be used to quantify + how well a given model is calibrated e.g. how well the predicted output probabilities of the model matches the + actual probabilities of the ground truth distribution. + + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + .. math:: + \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)} + + .. math:: + \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)} + + .. math:: + \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)} + + Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of + predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed + in an uniform way in the [0,1] range. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + softmax per sample. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + n_bins: Number of bins to use when computing the metric. + norm: Norm used to compare empirical and expected probability bins. + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example: + >>> from torchmetrics.functional.classification import multiclass_calibration_error + >>> preds = torch.tensor([[0.25, 0.20, 0.55], + ... [0.55, 0.05, 0.40], + ... [0.10, 0.30, 0.60], + ... [0.90, 0.05, 0.05]]) + >>> target = torch.tensor([0, 1, 2, 0]) + >>> multiclass_calibration_error(preds, target, num_classes=3, n_bins=3, norm='l1') + tensor(0.2000) + >>> multiclass_calibration_error(preds, target, num_classes=3, n_bins=3, norm='l2') + tensor(0.2082) + >>> multiclass_calibration_error(preds, target, num_classes=3, n_bins=3, norm='max') + tensor(0.2333) + """ + if validate_args: + _multiclass_calibration_error_arg_validation(num_classes, n_bins, norm, ignore_index) + _multiclass_calibration_error_tensor_validation(preds, target, num_classes, ignore_index) + preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index, convert_to_labels=False) + confidences, accuracies = _multiclass_calibration_error_update(preds, target) + return _ce_compute(confidences, accuracies, n_bins, norm) + + +def calibration_error( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass"] = None, + n_bins: int = 15, + norm: Literal["l1", "l2", "max"] = "l1", + num_classes: Optional[int] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""`Top-label Calibration Error`_. The expected calibration error can be used to quantify how well a given + model is calibrated e.g. how well the predicted output probabilities of the model matches the actual + probabilities of the ground truth distribution. + + Three different norms are implemented, each corresponding to variations on the calibration error metric. + + .. math:: + \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)} + + .. math:: + \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)} + + .. math:: + \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)} + + Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of + predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed + in an uniform way in the [0,1] range. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of + :func:`binary_calibration_error` and :func:`multiclass_calibration_error` for the specific details of + each argument influence and examples. + """ + assert norm is not None + if task == "binary": + return binary_calibration_error(preds, target, n_bins, norm, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + return multiclass_calibration_error(preds, target, num_classes, n_bins, norm, ignore_index, validate_args) + raise ValueError(f"Expected argument `task` to either be `'binary'` or `'multiclass'` but got {task}") diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/cohen_kappa.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/cohen_kappa.py new file mode 100644 index 0000000000000000000000000000000000000000..b4d9c1217a0ccff157bea007d370af399ca66997 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/cohen_kappa.py @@ -0,0 +1,266 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.confusion_matrix import ( + _binary_confusion_matrix_arg_validation, + _binary_confusion_matrix_format, + _binary_confusion_matrix_tensor_validation, + _binary_confusion_matrix_update, + _multiclass_confusion_matrix_arg_validation, + _multiclass_confusion_matrix_format, + _multiclass_confusion_matrix_tensor_validation, + _multiclass_confusion_matrix_update, +) + + +def _cohen_kappa_reduce(confmat: Tensor, weights: Optional[Literal["linear", "quadratic", "none"]] = None) -> Tensor: + """Reduce an un-normalized confusion matrix of shape (n_classes, n_classes) into the cohen kappa score.""" + confmat = confmat.float() if not confmat.is_floating_point() else confmat + n_classes = confmat.shape[0] + sum0 = confmat.sum(dim=0, keepdim=True) + sum1 = confmat.sum(dim=1, keepdim=True) + expected = sum1 @ sum0 / sum0.sum() # outer product + + if weights is None or weights == "none": + w_mat = torch.ones_like(confmat).flatten() + w_mat[:: n_classes + 1] = 0 + w_mat = w_mat.reshape(n_classes, n_classes) + elif weights in ("linear", "quadratic"): + w_mat = torch.zeros_like(confmat) + w_mat += torch.arange(n_classes, dtype=w_mat.dtype, device=w_mat.device) + if weights == "linear": + w_mat = torch.abs(w_mat - w_mat.T) + else: + w_mat = torch.pow(w_mat - w_mat.T, 2.0) + else: + raise ValueError( + f"Received {weights} for argument ``weights`` but should be either" " None, 'linear' or 'quadratic'" + ) + k = torch.sum(w_mat * confmat) / torch.sum(w_mat * expected) + return 1 - k + + +def _binary_cohen_kappa_arg_validation( + threshold: float = 0.5, + ignore_index: Optional[int] = None, + weights: Optional[Literal["linear", "quadratic", "none"]] = None, +) -> None: + """Validate non tensor input. + + - ``threshold`` has to be a float in the [0,1] range + - ``ignore_index`` has to be None or int + - ``weights`` has to be "linear" | "quadratic" | "none" | None + """ + _binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize=None) + allowed_weights = ("linear", "quadratic", "none", None) + if weights not in allowed_weights: + raise ValueError(f"Expected argument `weight` to be one of {allowed_weights}, but got {weights}.") + + +def binary_cohen_kappa( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + weights: Optional[Literal["linear", "quadratic", "none"]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement for binary tasks. It is defined + as. + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + threshold: Threshold for transforming probability to binary (0,1) predictions + weights: Weighting type to calculate the score. Choose from: + + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import binary_cohen_kappa + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> binary_cohen_kappa(preds, target) + tensor(0.5000) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import binary_cohen_kappa + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> binary_cohen_kappa(preds, target) + tensor(0.5000) + """ + if validate_args: + _binary_cohen_kappa_arg_validation(threshold, ignore_index, weights) + _binary_confusion_matrix_tensor_validation(preds, target, ignore_index) + preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index) + confmat = _binary_confusion_matrix_update(preds, target) + return _cohen_kappa_reduce(confmat, weights) + + +def _multiclass_cohen_kappa_arg_validation( + num_classes: int, + ignore_index: Optional[int] = None, + weights: Optional[Literal["linear", "quadratic", "none"]] = None, +) -> None: + """Validate non tensor input. + + - ``num_classes`` has to be a int larger than 1 + - ``ignore_index`` has to be None or int + - ``weights`` has to be "linear" | "quadratic" | "none" | None + """ + _multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize=None) + allowed_weights = ("linear", "quadratic", "none", None) + if weights not in allowed_weights: + raise ValueError(f"Expected argument `weight` to be one of {allowed_weights}, but got {weights}.") + + +def multiclass_cohen_kappa( + preds: Tensor, + target: Tensor, + num_classes: int, + weights: Optional[Literal["linear", "quadratic", "none"]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement for multiclass tasks. It is + defined as. + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + weights: Weighting type to calculate the score. Choose from: + + - ``None`` or ``'none'``: no weighting + - ``'linear'``: linear weighting + - ``'quadratic'``: quadratic weighting + + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (pred is integer tensor): + >>> from torchmetrics.functional.classification import multiclass_cohen_kappa + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> multiclass_cohen_kappa(preds, target, num_classes=3) + tensor(0.6364) + + Example (pred is float tensor): + >>> from torchmetrics.functional.classification import multiclass_cohen_kappa + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> multiclass_cohen_kappa(preds, target, num_classes=3) + tensor(0.6364) + """ + if validate_args: + _multiclass_cohen_kappa_arg_validation(num_classes, ignore_index, weights) + _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index) + preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index) + confmat = _multiclass_confusion_matrix_update(preds, target, num_classes) + return _cohen_kappa_reduce(confmat, weights) + + +def cohen_kappa( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + weights: Optional[Literal["linear", "quadratic", "none"]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement. It is defined as. + + .. math:: + \kappa = (p_o - p_e) / (1 - p_e) + + where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is + the expected agreement when both annotators assign labels randomly. Note that + :math:`p_e` is estimated using a per-annotator empirical prior over the + class labels. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of + :func:`binary_cohen_kappa` and :func:`multiclass_cohen_kappa` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> cohen_kappa(preds, target, task="multiclass", num_classes=2) + tensor(0.5000) + """ + if task == "binary": + return binary_cohen_kappa(preds, target, threshold, weights, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + return multiclass_cohen_kappa(preds, target, num_classes, weights, ignore_index, validate_args) + raise ValueError(f"Expected argument `task` to either be `'binary'` or `'multiclass'` but got {task}") diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/confusion_matrix.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/confusion_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..e8710f698c368f204565f3e7cf3d79226bee62d7 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/confusion_matrix.py @@ -0,0 +1,647 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.utilities.checks import _check_same_shape +from torchmetrics.utilities.data import _bincount +from torchmetrics.utilities.prints import rank_zero_warn + + +def _confusion_matrix_reduce( + confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None +) -> Tensor: + """Reduce an un-normalized confusion matrix + Args: + confmat: un-normalized confusion matrix + normalize: normalization method. + - `"true"` will divide by the sum of the column dimension. + - `"pred"` will divide by the sum of the row dimension. + - `"all"` will divide by the sum of the full matrix + - `"none"` or `None` will apply no reduction + + Returns: + Normalized confusion matrix + """ + allowed_normalize = ("true", "pred", "all", "none", None) + if normalize not in allowed_normalize: + raise ValueError(f"Argument `normalize` needs to one of the following: {allowed_normalize}") + if normalize is not None and normalize != "none": + confmat = confmat.float() if not confmat.is_floating_point() else confmat + if normalize == "true": + confmat = confmat / confmat.sum(axis=-1, keepdim=True) + elif normalize == "pred": + confmat = confmat / confmat.sum(axis=-2, keepdim=True) + elif normalize == "all": + confmat = confmat / confmat.sum(axis=[-2, -1], keepdim=True) + + nan_elements = confmat[torch.isnan(confmat)].nelement() + if nan_elements: + confmat[torch.isnan(confmat)] = 0 + rank_zero_warn(f"{nan_elements} NaN values found in confusion matrix have been replaced with zeros.") + return confmat + + +def _binary_confusion_matrix_arg_validation( + threshold: float = 0.5, + ignore_index: Optional[int] = None, + normalize: Optional[Literal["true", "pred", "all", "none"]] = None, +) -> None: + """Validate non tensor input. + + - ``threshold`` has to be a float in the [0,1] range + - ``ignore_index`` has to be None or int + - ``normalize`` has to be "true" | "pred" | "all" | "none" | None + """ + if not (isinstance(threshold, float) and (0 <= threshold <= 1)): + raise ValueError(f"Expected argument `threshold` to be a float in the [0,1] range, but got {threshold}.") + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + allowed_normalize = ("true", "pred", "all", "none", None) + if normalize not in allowed_normalize: + raise ValueError(f"Expected argument `normalize` to be one of {allowed_normalize}, but got {normalize}.") + + +def _binary_confusion_matrix_tensor_validation( + preds: Tensor, target: Tensor, ignore_index: Optional[int] = None +) -> None: + """Validate tensor input. + + - tensors have to be of same shape + - all values in target tensor that are not ignored have to be in {0, 1} + - if pred tensor is not floating point, then all values also have to be in {0, 1} + """ + # Check that they have same shape + _check_same_shape(preds, target) + + # Check that target only contains {0,1} values or value in ignore_index + unique_values = torch.unique(target) + if ignore_index is None: + check = torch.any((unique_values != 0) & (unique_values != 1)) + else: + check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index)) + if check: + raise RuntimeError( + f"Detected the following values in `target`: {unique_values} but expected only" + f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}." + ) + + # If preds is label tensor, also check that it only contains {0,1} values + if not preds.is_floating_point(): + unique_values = torch.unique(preds) + if torch.any((unique_values != 0) & (unique_values != 1)): + raise RuntimeError( + f"Detected the following values in `preds`: {unique_values} but expected only" + " the following values [0,1] since preds is a label tensor." + ) + + +def _binary_confusion_matrix_format( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + ignore_index: Optional[int] = None, + convert_to_labels: bool = True, +) -> Tuple[Tensor, Tensor]: + """Convert all input to label format. + + - Remove all datapoints that should be ignored + - If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range + - If preds tensor is floating point, thresholds afterwards + """ + preds = preds.flatten() + target = target.flatten() + if ignore_index is not None: + idx = target != ignore_index + preds = preds[idx] + target = target[idx] + + if preds.is_floating_point(): + if not torch.all((0 <= preds) * (preds <= 1)): + # preds is logits, convert with sigmoid + preds = preds.sigmoid() + if convert_to_labels: + preds = preds > threshold + + return preds, target + + +def _binary_confusion_matrix_update(preds: Tensor, target: Tensor) -> Tensor: + """Computes the bins to update the confusion matrix with.""" + unique_mapping = (target * 2 + preds).to(torch.long) + bins = _bincount(unique_mapping, minlength=4) + return bins.reshape(2, 2) + + +def _binary_confusion_matrix_compute( + confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None +) -> Tensor: + """Reduces the confusion matrix to it's final form. + + Normalization technique can be chosen by ``normalize``. + """ + return _confusion_matrix_reduce(confmat, normalize) + + +def binary_confusion_matrix( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + normalize: Optional[Literal["true", "pred", "all", "none"]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes the `confusion matrix`_ for binary tasks. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + threshold: Threshold for transforming probability to binary (0,1) predictions + normalize: Normalization mode for confusion matrix. Choose from: + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + A ``[2, 2]`` tensor + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import binary_confusion_matrix + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> binary_confusion_matrix(preds, target) + tensor([[2, 0], + [1, 1]]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import binary_confusion_matrix + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> binary_confusion_matrix(preds, target) + tensor([[2, 0], + [1, 1]]) + """ + if validate_args: + _binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize) + _binary_confusion_matrix_tensor_validation(preds, target, ignore_index) + preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index) + confmat = _binary_confusion_matrix_update(preds, target) + return _binary_confusion_matrix_compute(confmat, normalize) + + +def _multiclass_confusion_matrix_arg_validation( + num_classes: int, + ignore_index: Optional[int] = None, + normalize: Optional[Literal["true", "pred", "all", "none"]] = None, +) -> None: + """Validate non tensor input. + + - ``num_classes`` has to be a int larger than 1 + - ``ignore_index`` has to be None or int + - ``normalize`` has to be "true" | "pred" | "all" | "none" | None + """ + if not isinstance(num_classes, int) or num_classes < 2: + raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}") + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + allowed_normalize = ("true", "pred", "all", "none", None) + if normalize not in allowed_normalize: + raise ValueError(f"Expected argument `normalize` to be one of {allowed_normalize}, but got {normalize}.") + + +def _multiclass_confusion_matrix_tensor_validation( + preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None +) -> None: + """Validate tensor input. + + - if target has one more dimension than preds, then all dimensions except for preds.shape[1] should match + exactly. preds.shape[1] should have size equal to number of classes + - if preds and target have same number of dims, then all dimensions should match + - all values in target tensor that are not ignored have to be {0, ..., num_classes - 1} + - if pred tensor is not floating point, then all values also have to be in {0, ..., num_classes - 1} + """ + if preds.ndim == target.ndim + 1: + if not preds.is_floating_point(): + raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.") + if preds.shape[1] != num_classes: + raise ValueError( + "If `preds` have one dimension more than `target`, `preds.shape[1]` should be" + " equal to number of classes." + ) + if preds.shape[2:] != target.shape[1:]: + raise ValueError( + "If `preds` have one dimension more than `target`, the shape of `preds` should be" + " (N, C, ...), and the shape of `target` should be (N, ...)." + ) + elif preds.ndim == target.ndim: + if preds.shape != target.shape: + raise ValueError( + "The `preds` and `target` should have the same shape,", + f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", + ) + else: + raise ValueError( + "Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)" + " and `preds` should be (N, C, ...)." + ) + + num_unique_values = len(torch.unique(target)) + if ignore_index is None: + check = num_unique_values > num_classes + else: + check = num_unique_values > num_classes + 1 + if check: + raise RuntimeError( + "Detected more unique values in `target` than `num_classes`. Expected only " + f"{num_classes if ignore_index is None else num_classes + 1} but found " + f"{num_unique_values} in `target`." + ) + + if not preds.is_floating_point(): + num_unique_values = len(torch.unique(preds)) + if num_unique_values > num_classes: + raise RuntimeError( + "Detected more unique values in `preds` than `num_classes`. Expected only " + f"{num_classes} but found {num_unique_values} in `preds`." + ) + + +def _multiclass_confusion_matrix_format( + preds: Tensor, + target: Tensor, + ignore_index: Optional[int] = None, + convert_to_labels: bool = True, +) -> Tuple[Tensor, Tensor]: + """Convert all input to label format. + + - Applies argmax if preds have one more dimension than target + - Remove all datapoints that should be ignored + """ + # Apply argmax if we have one more dimension + if preds.ndim == target.ndim + 1 and convert_to_labels: + preds = preds.argmax(dim=1) + + if convert_to_labels: + preds = preds.flatten() + else: + preds = torch.movedim(preds, 1, -1).reshape(-1, preds.shape[1]) + target = target.flatten() + + if ignore_index is not None: + idx = target != ignore_index + preds = preds[idx] + target = target[idx] + + return preds, target + + +def _multiclass_confusion_matrix_update(preds: Tensor, target: Tensor, num_classes: int) -> Tensor: + """Compute the bins to update the confusion matrix with.""" + unique_mapping = target.to(torch.long) * num_classes + preds.to(torch.long) + bins = _bincount(unique_mapping, minlength=num_classes**2) + return bins.reshape(num_classes, num_classes) + + +def _multiclass_confusion_matrix_compute( + confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None +) -> Tensor: + """Reduces the confusion matrix to it's final form. + + Normalization technique can be chosen by ``normalize``. + """ + return _confusion_matrix_reduce(confmat, normalize) + + +def multiclass_confusion_matrix( + preds: Tensor, + target: Tensor, + num_classes: int, + normalize: Optional[Literal["true", "pred", "all", "none"]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes the `confusion matrix`_ for multiclass tasks. + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + normalize: Normalization mode for confusion matrix. Choose from: + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + A ``[num_classes, num_classes]`` tensor + + Example (pred is integer tensor): + >>> from torchmetrics.functional.classification import multiclass_confusion_matrix + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> multiclass_confusion_matrix(preds, target, num_classes=3) + tensor([[1, 1, 0], + [0, 1, 0], + [0, 0, 1]]) + + Example (pred is float tensor): + >>> from torchmetrics.functional.classification import multiclass_confusion_matrix + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> multiclass_confusion_matrix(preds, target, num_classes=3) + tensor([[1, 1, 0], + [0, 1, 0], + [0, 0, 1]]) + """ + if validate_args: + _multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize) + _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index) + preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index) + confmat = _multiclass_confusion_matrix_update(preds, target, num_classes) + return _multiclass_confusion_matrix_compute(confmat, normalize) + + +def _multilabel_confusion_matrix_arg_validation( + num_labels: int, + threshold: float = 0.5, + ignore_index: Optional[int] = None, + normalize: Optional[Literal["true", "pred", "all", "none"]] = None, +) -> None: + """Validate non tensor input. + + - ``num_labels`` should be an int larger than 1 + - ``threshold`` has to be a float in the [0,1] range + - ``ignore_index`` has to be None or int + - ``normalize`` has to be "true" | "pred" | "all" | "none" | None + """ + if not isinstance(num_labels, int) or num_labels < 2: + raise ValueError(f"Expected argument `num_labels` to be an integer larger than 1, but got {num_labels}") + if not (isinstance(threshold, float) and (0 <= threshold <= 1)): + raise ValueError(f"Expected argument `threshold` to be a float, but got {threshold}.") + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + allowed_normalize = ("true", "pred", "all", "none", None) + if normalize not in allowed_normalize: + raise ValueError(f"Expected argument `normalize` to be one of {allowed_normalize}, but got {normalize}.") + + +def _multilabel_confusion_matrix_tensor_validation( + preds: Tensor, target: Tensor, num_labels: int, ignore_index: Optional[int] = None +) -> None: + """Validate tensor input. + + - tensors have to be of same shape + - the second dimension of both tensors need to be equal to the number of labels + - all values in target tensor that are not ignored have to be in {0, 1} + - if pred tensor is not floating point, then all values also have to be in {0, 1} + """ + # Check that they have same shape + _check_same_shape(preds, target) + + if preds.shape[1] != num_labels: + raise ValueError( + "Expected both `target.shape[1]` and `preds.shape[1]` to be equal to the number of labels" + f" but got {preds.shape[1]} and expected {num_labels}" + ) + + # Check that target only contains [0,1] values or value in ignore_index + unique_values = torch.unique(target) + if ignore_index is None: + check = torch.any((unique_values != 0) & (unique_values != 1)) + else: + check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index)) + if check: + raise RuntimeError( + f"Detected the following values in `target`: {unique_values} but expected only" + f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}." + ) + + # If preds is label tensor, also check that it only contains [0,1] values + if not preds.is_floating_point(): + unique_values = torch.unique(preds) + if torch.any((unique_values != 0) & (unique_values != 1)): + raise RuntimeError( + f"Detected the following values in `preds`: {unique_values} but expected only" + " the following values [0,1] since preds is a label tensor." + ) + + +def _multilabel_confusion_matrix_format( + preds: Tensor, + target: Tensor, + num_labels: int, + threshold: float = 0.5, + ignore_index: Optional[int] = None, + should_threshold: bool = True, +) -> Tuple[Tensor, Tensor]: + """Convert all input to label format. + + - If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range + - If preds tensor is floating point, thresholds afterwards + - Mask all elements that should be ignored with negative numbers for later filtration + """ + if preds.is_floating_point(): + if not torch.all((0 <= preds) * (preds <= 1)): + preds = preds.sigmoid() + if should_threshold: + preds = preds > threshold + preds = torch.movedim(preds, 1, -1).reshape(-1, num_labels) + target = torch.movedim(target, 1, -1).reshape(-1, num_labels) + + if ignore_index is not None: + preds = preds.clone() + target = target.clone() + # Make sure that when we map, it will always result in a negative number that we can filter away + # Each label correspond to a 2x2 matrix = 4 elements per label + idx = target == ignore_index + preds[idx] = -4 * num_labels + target[idx] = -4 * num_labels + + return preds, target + + +def _multilabel_confusion_matrix_update(preds: Tensor, target: Tensor, num_labels: int) -> Tensor: + """Computes the bins to update the confusion matrix with.""" + unique_mapping = ((2 * target + preds) + 4 * torch.arange(num_labels, device=preds.device)).flatten() + unique_mapping = unique_mapping[unique_mapping >= 0] + bins = _bincount(unique_mapping, minlength=4 * num_labels) + return bins.reshape(num_labels, 2, 2) + + +def _multilabel_confusion_matrix_compute( + confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None +) -> Tensor: + """Reduces the confusion matrix to it's final form. + + Normalization technique can be chosen by ``normalize``. + """ + return _confusion_matrix_reduce(confmat, normalize) + + +def multilabel_confusion_matrix( + preds: Tensor, + target: Tensor, + num_labels: int, + threshold: float = 0.5, + normalize: Optional[Literal["true", "pred", "all", "none"]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes the `confusion matrix`_ for multilabel tasks. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + normalize: Normalization mode for confusion matrix. Choose from: + + - ``None`` or ``'none'``: no normalization (default) + - ``'true'``: normalization over the targets (most commonly used) + - ``'pred'``: normalization over the predictions + - ``'all'``: normalization over the whole matrix + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + A ``[num_labels, 2, 2]`` tensor + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multilabel_confusion_matrix + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> multilabel_confusion_matrix(preds, target, num_labels=3) + tensor([[[1, 0], [0, 1]], + [[1, 0], [1, 0]], + [[0, 1], [0, 1]]]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multilabel_confusion_matrix + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> multilabel_confusion_matrix(preds, target, num_labels=3) + tensor([[[1, 0], [0, 1]], + [[1, 0], [1, 0]], + [[0, 1], [0, 1]]]) + """ + if validate_args: + _multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index, normalize) + _multilabel_confusion_matrix_tensor_validation(preds, target, num_labels, ignore_index) + preds, target = _multilabel_confusion_matrix_format(preds, target, num_labels, threshold, ignore_index) + confmat = _multilabel_confusion_matrix_update(preds, target, num_labels) + return _multilabel_confusion_matrix_compute(confmat, normalize) + + +def confusion_matrix( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + normalize: Optional[Literal["true", "pred", "all", "none"]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes the `confusion matrix`_. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_confusion_matrix`, :func:`multiclass_confusion_matrix` and :func:`multilabel_confusion_matrix` for + the specific details of each argument influence and examples. + + Legacy Example: + >>> from torchmetrics import ConfusionMatrix + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> confmat = ConfusionMatrix(task="binary") + >>> confmat(preds, target) + tensor([[2, 0], + [1, 1]]) + + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> confmat = ConfusionMatrix(task="multiclass", num_classes=3) + >>> confmat(preds, target) + tensor([[1, 1, 0], + [0, 1, 0], + [0, 0, 1]]) + + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> confmat = ConfusionMatrix(task="multilabel", num_labels=3) + >>> confmat(preds, target) + tensor([[[1, 0], [0, 1]], + [[1, 0], [1, 0]], + [[0, 1], [0, 1]]]) + """ + if task == "binary": + return binary_confusion_matrix(preds, target, threshold, normalize, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + return multiclass_confusion_matrix(preds, target, num_classes, normalize, ignore_index, validate_args) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_confusion_matrix(preds, target, num_labels, threshold, normalize, ignore_index, validate_args) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/dice.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/dice.py new file mode 100644 index 0000000000000000000000000000000000000000..301321bfaeba2cf08de8e85bf9e7d3f6d7858858 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/dice.py @@ -0,0 +1,207 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import torch +from torch import Tensor + +from torchmetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update +from torchmetrics.utilities.checks import _input_squeeze +from torchmetrics.utilities.enums import AverageMethod, MDMCAverageMethod + + +def _dice_compute( + tp: Tensor, + fp: Tensor, + fn: Tensor, + average: Optional[str], + mdmc_average: Optional[str], + zero_division: int = 0, +) -> Tensor: + """Computes dice from the stat scores: true positives, false positives, false negatives. + + Args: + tp: True positives + fp: False positives + fn: False negatives + average: Defines the reduction that is applied + mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter) + """ + numerator = 2 * tp + denominator = 2 * tp + fp + fn + + if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + cond = tp + fp + fn == 0 + numerator = numerator[~cond] + denominator = denominator[~cond] + + if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE: + # a class is not present if there exists no TPs, no FPs, and no FNs + meaningless_indeces = torch.nonzero((tp | fn | fp) == 0).cpu() + numerator[meaningless_indeces, ...] = -1 + denominator[meaningless_indeces, ...] = -1 + + return _reduce_stat_scores( + numerator=numerator, + denominator=denominator, + weights=None if average != "weighted" else tp + fn, + average=average, + mdmc_average=mdmc_average, + zero_division=zero_division, + ) + + +def dice( + preds: Tensor, + target: Tensor, + zero_division: int = 0, + average: Optional[str] = "micro", + mdmc_average: Optional[str] = "global", + threshold: float = 0.5, + top_k: Optional[int] = None, + num_classes: Optional[int] = None, + multiclass: Optional[bool] = None, + ignore_index: Optional[int] = None, +) -> Tensor: + r"""Computes `Dice`_: + + .. math:: \text{Dice} = \frac{\text{2 * TP}}{\text{2 * TP} + \text{FP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. + + It is recommend set `ignore_index` to index of background class. + + The reduction method (how the recall scores are aggregated) is controlled by the + ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the + multi-dimensional multi-class case. + + Args: + preds: Predictions from model (probabilities, logits or labels) + target: Ground truth values + zero_division: The value to use for the score if denominator equals zero + average: + Defines the reduction that is applied. Should be one of the following: + + - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. + - ``'macro'``: Calculate the metric for each class separately, and average the + metrics across classes (with equal weights for each class). + - ``'weighted'``: Calculate the metric for each class separately, and average the + metrics across classes, weighting each class by its support (``tp + fn``). + - ``'none'`` or ``None``: Calculate the metric for each class separately, and return + the metric for every class. + - ``'samples'``: Calculate the metric for each sample, and average the metrics + across samples (with equal weights for each sample). + + .. note:: What is considered a sample in the multi-dimensional multi-class case + depends on the value of ``mdmc_average``. + + .. note:: If ``'none'`` and a given class doesn't occur in the ``preds`` or ``target``, + the value for the class will be ``nan``. + + mdmc_average: + Defines how averaging is done for multi-dimensional multi-class inputs (on top of the + ``average`` parameter). Should be one of the following: + + - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional + multi-class. + + - ``'samplewise'``: In this case, the statistics are computed separately for each + sample on the ``N`` axis, and then averaged over samples. + The computation for each sample is done by treating the flattened extra axes ``...`` + as the ``N`` dimension within the sample, + and computing the metric for the sample based on that. + + - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs + are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they + were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. + + ignore_index: + Integer specifying a target class to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` + or ``'none'``, the score for the ignored class will be returned as ``nan``. + + num_classes: + Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. + + threshold: + Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. + top_k: + Number of the highest probability or logit score predictions considered finding the correct label, + relevant only for (multi-dimensional) multi-class inputs. The + default value (``None``) will be interpreted as 1 for these inputs. + + Should be left at default (``None``) for all other types of inputs. + multiclass: + Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be. + + Return: + The shape of the returned tensor depends on the ``average`` parameter + + - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned + - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number of classes + + Raises: + ValueError: + If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"`` or ``None`` + ValueError: + If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. + ValueError: + If ``average`` is set but ``num_classes`` is not provided. + ValueError: + If ``num_classes`` is set and ``ignore_index`` is not in the range ``[0, num_classes)``. + + Example: + >>> from torchmetrics.functional import dice + >>> preds = torch.tensor([2, 0, 2, 1]) + >>> target = torch.tensor([1, 1, 2, 0]) + >>> dice(preds, target, average='micro') + tensor(0.2500) + """ + allowed_average = ("micro", "macro", "weighted", "samples", "none", None) + if average not in allowed_average: + raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") + + if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): + raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") + + allowed_mdmc_average = [None, "samplewise", "global"] + if mdmc_average not in allowed_mdmc_average: + raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") + + if num_classes and ignore_index is not None and (not ignore_index < num_classes or num_classes == 1): + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes") + + if top_k is not None and (not isinstance(top_k, int) or top_k <= 0): + raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}") + + preds, target = _input_squeeze(preds, target) + reduce = "macro" if average in ("weighted", "none", None) else average + + tp, fp, _, fn = _stat_scores_update( + preds, + target, + reduce=reduce, + mdmc_reduce=mdmc_average, + threshold=threshold, + num_classes=num_classes, + top_k=top_k, + multiclass=multiclass, + ignore_index=ignore_index, + ) + + return _dice_compute(tp, fp, fn, average, mdmc_average, zero_division) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/exact_match.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/exact_match.py new file mode 100644 index 0000000000000000000000000000000000000000..14fe8f7ce2bcb1305bdb0585f264852fb539f1ea --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/exact_match.py @@ -0,0 +1,241 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.stat_scores import ( + _multiclass_stat_scores_arg_validation, + _multiclass_stat_scores_format, + _multiclass_stat_scores_tensor_validation, + _multilabel_stat_scores_arg_validation, + _multilabel_stat_scores_format, + _multilabel_stat_scores_tensor_validation, +) +from torchmetrics.utilities.compute import _safe_divide + + +def _exact_match_reduce( + correct: Tensor, + total: Tensor, +) -> Tensor: + """Final reduction for exact match.""" + return _safe_divide(correct, total) + + +def _multiclass_exact_match_update( + preds: Tensor, + target: Tensor, + multidim_average: Literal["global", "samplewise"] = "global", +) -> Tuple[Tensor, Tensor]: + """Computes the statistics.""" + correct = (preds == target).sum(1) == preds.shape[1] + correct = correct if multidim_average == "samplewise" else correct.sum() + total = torch.tensor(preds.shape[0] if multidim_average == "global" else 1, device=correct.device) + return correct, total + + +def multiclass_exact_match( + preds: Tensor, + target: Tensor, + num_classes: int, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes Exact match (also known as subset accuracy) for multiclass tasks. Exact Match is a stricter version + of accuracy where all labels have to match exactly for the sample to be correctly classified. + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of labels + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``multidim_average`` argument: + + - If ``multidim_average`` is set to ``global`` the output will be a scalar tensor + - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multiclass_exact_match + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> multiclass_exact_match(preds, target, num_classes=3, multidim_average='global') + tensor(0.5000) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multiclass_exact_match + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> multiclass_exact_match(preds, target, num_classes=3, multidim_average='samplewise') + tensor([1., 0.]) + """ + top_k, average = 1, None + if validate_args: + _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index) + _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index) + preds, target = _multiclass_stat_scores_format(preds, target, top_k) + correct, total = _multiclass_exact_match_update(preds, target, multidim_average) + return _exact_match_reduce(correct, total) + + +def _multilabel_exact_match_update( + preds: Tensor, target: Tensor, num_labels: int, multidim_average: Literal["global", "samplewise"] = "global" +) -> Tuple[Tensor, Tensor]: + """Computes the statistics.""" + if multidim_average == "global": + preds = torch.movedim(preds, 1, -1).reshape(-1, num_labels) + target = torch.movedim(target, 1, -1).reshape(-1, num_labels) + + correct = ((preds == target).sum(1) == num_labels).sum(dim=-1) + total = torch.tensor(preds.shape[0 if multidim_average == "global" else 2], device=correct.device) + return correct, total + + +def multilabel_exact_match( + preds: Tensor, + target: Tensor, + num_labels: int, + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes Exact match (also known as subset accuracy) for multilabel tasks. Exact Match is a stricter version + of accuracy where all labels have to match exactly for the sample to be correctly classified. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``multidim_average`` argument: + + - If ``multidim_average`` is set to ``global`` the output will be a scalar tensor + - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multilabel_exact_match + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> multilabel_exact_match(preds, target, num_labels=3) + tensor(0.5000) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multilabel_exact_match + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> multilabel_exact_match(preds, target, num_labels=3) + tensor(0.5000) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multilabel_exact_match + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> multilabel_exact_match(preds, target, num_labels=3, multidim_average='samplewise') + tensor([0., 0.]) + """ + average = None + if validate_args: + _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index) + _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index) + preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index) + correct, total = _multilabel_exact_match_update(preds, target, num_labels, multidim_average) + return _exact_match_reduce(correct, total) + + +def exact_match( + preds: Tensor, + target: Tensor, + task: Literal["multiclass", "multilabel"], + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes Exact match (also known as subset accuracy). Exact Match is a stricter version of accuracy where + all classes/labels have to match exactly for the sample to be correctly classified. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'multiclass'`` or ``'multilabel'``. See the documentation of + :func:`multiclass_exact_match` and :func:`multilabel_exact_match` for the specific details of + each argument influence and examples. + Legacy Example: + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> exact_match(preds, target, task="multiclass", num_classes=3, multidim_average='global') + tensor(0.5000) + + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]]) + >>> exact_match(preds, target, task="multiclass", num_classes=3, multidim_average='samplewise') + tensor([1., 0.]) + """ + if task == "multiclass": + assert num_classes is not None + return multiclass_exact_match(preds, target, num_classes, multidim_average, ignore_index, validate_args) + if task == "multilabel": + assert num_labels is not None + return multilabel_exact_match( + preds, target, num_labels, threshold, multidim_average, ignore_index, validate_args + ) + raise ValueError(f"Expected argument `task` to either be `'multiclass'` or `'multilabel'` but got {task}") diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/f_beta.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/f_beta.py new file mode 100644 index 0000000000000000000000000000000000000000..edb74966f53e1be469549a1d9b9383b9a9ab28a1 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/f_beta.py @@ -0,0 +1,775 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.stat_scores import ( + _binary_stat_scores_arg_validation, + _binary_stat_scores_format, + _binary_stat_scores_tensor_validation, + _binary_stat_scores_update, + _multiclass_stat_scores_arg_validation, + _multiclass_stat_scores_format, + _multiclass_stat_scores_tensor_validation, + _multiclass_stat_scores_update, + _multilabel_stat_scores_arg_validation, + _multilabel_stat_scores_format, + _multilabel_stat_scores_tensor_validation, + _multilabel_stat_scores_update, +) +from torchmetrics.utilities.compute import _safe_divide + + +def _fbeta_reduce( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + beta: float, + average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]], + multidim_average: Literal["global", "samplewise"] = "global", +) -> Tensor: + beta2 = beta**2 + if average == "binary": + return _safe_divide((1 + beta2) * tp, (1 + beta2) * tp + beta2 * fn + fp) + elif average == "micro": + tp = tp.sum(dim=0 if multidim_average == "global" else 1) + fn = fn.sum(dim=0 if multidim_average == "global" else 1) + fp = fp.sum(dim=0 if multidim_average == "global" else 1) + return _safe_divide((1 + beta2) * tp, (1 + beta2) * tp + beta2 * fn + fp) + else: + fbeta_score = _safe_divide((1 + beta2) * tp, (1 + beta2) * tp + beta2 * fn + fp) + if average is None or average == "none": + return fbeta_score + if average == "weighted": + weights = tp + fn + else: + weights = torch.ones_like(fbeta_score) + return _safe_divide(weights * fbeta_score, weights.sum(-1, keepdim=True)).sum(-1) + + +def _binary_fbeta_score_arg_validation( + beta: float, + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, +) -> None: + if not (isinstance(beta, float) and beta > 0): + raise ValueError(f"Expected argument `beta` to be a float larger than 0, but got {beta}.") + _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index) + + +def binary_fbeta_score( + preds: Tensor, + target: Tensor, + beta: float, + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `F-score`_ metric for binary tasks: + + .. math:: + F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} + {(\beta^2 * \text{precision}) + \text{recall}} + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight + threshold: Threshold for transforming probability to binary {0,1} predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average`` + is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import binary_fbeta_score + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> binary_fbeta_score(preds, target, beta=2.0) + tensor(0.6667) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import binary_fbeta_score + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> binary_fbeta_score(preds, target, beta=2.0) + tensor(0.6667) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import binary_fbeta_score + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> binary_fbeta_score(preds, target, beta=2.0, multidim_average='samplewise') + tensor([0.5882, 0.0000]) + """ + if validate_args: + _binary_fbeta_score_arg_validation(beta, threshold, multidim_average, ignore_index) + _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index) + preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index) + tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average) + return _fbeta_reduce(tp, fp, tn, fn, beta, average="binary", multidim_average=multidim_average) + + +def _multiclass_fbeta_score_arg_validation( + beta: float, + num_classes: int, + top_k: int = 1, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, +) -> None: + if not (isinstance(beta, float) and beta > 0): + raise ValueError(f"Expected argument `beta` to be a float larger than 0, but got {beta}.") + _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index) + + +def multiclass_fbeta_score( + preds: Tensor, + target: Tensor, + beta: float, + num_classes: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + top_k: int = 1, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `F-score`_ metric for multiclass tasks: + + .. math:: + F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} + {(\beta^2 * \text{precision}) + \text{recall}} + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + top_k: + Number of highest probability or logit score predictions considered to find the correct label. + Only works when ``preds`` contain probabilities/logits. + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multiclass_fbeta_score + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3) + tensor(0.7963) + >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, average=None) + tensor([0.5556, 0.8333, 1.0000]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multiclass_fbeta_score + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3) + tensor(0.7963) + >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, average=None) + tensor([0.5556, 0.8333, 1.0000]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multiclass_fbeta_score + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, multidim_average='samplewise') + tensor([0.4697, 0.2706]) + >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, multidim_average='samplewise', average=None) + tensor([[0.9091, 0.0000, 0.5000], + [0.0000, 0.3571, 0.4545]]) + """ + if validate_args: + _multiclass_fbeta_score_arg_validation(beta, num_classes, top_k, average, multidim_average, ignore_index) + _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index) + preds, target = _multiclass_stat_scores_format(preds, target, top_k) + tp, fp, tn, fn = _multiclass_stat_scores_update( + preds, target, num_classes, top_k, average, multidim_average, ignore_index + ) + return _fbeta_reduce(tp, fp, tn, fn, beta, average=average, multidim_average=multidim_average) + + +def _multilabel_fbeta_score_arg_validation( + beta: float, + num_labels: int, + threshold: float = 0.5, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, +) -> None: + if not (isinstance(beta, float) and beta > 0): + raise ValueError(f"Expected argument `beta` to be a float larger than 0, but got {beta}.") + _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index) + + +def multilabel_fbeta_score( + preds: Tensor, + target: Tensor, + beta: float, + num_labels: int, + threshold: float = 0.5, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `F-score`_ metric for multilabel tasks: + + .. math:: + F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} + {(\beta^2 * \text{precision}) + \text{recall}} + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multilabel_fbeta_score + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3) + tensor(0.6111) + >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3, average=None) + tensor([1.0000, 0.0000, 0.8333]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multilabel_fbeta_score + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3) + tensor(0.6111) + >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3, average=None) + tensor([1.0000, 0.0000, 0.8333]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multilabel_fbeta_score + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> multilabel_fbeta_score(preds, target, num_labels=3, beta=2.0, multidim_average='samplewise') + tensor([0.5556, 0.0000]) + >>> multilabel_fbeta_score(preds, target, num_labels=3, beta=2.0, multidim_average='samplewise', average=None) + tensor([[0.8333, 0.8333, 0.0000], + [0.0000, 0.0000, 0.0000]]) + """ + if validate_args: + _multilabel_fbeta_score_arg_validation(beta, num_labels, threshold, average, multidim_average, ignore_index) + _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index) + preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index) + tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average) + return _fbeta_reduce(tp, fp, tn, fn, beta, average=average, multidim_average=multidim_average) + + +def binary_f1_score( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes F-1 score for binary tasks: + + .. math:: + F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}} + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + threshold: Threshold for transforming probability to binary {0,1} predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average`` + is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import binary_f1_score + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> binary_f1_score(preds, target) + tensor(0.6667) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import binary_f1_score + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> binary_f1_score(preds, target) + tensor(0.6667) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import binary_f1_score + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> binary_f1_score(preds, target, multidim_average='samplewise') + tensor([0.5000, 0.0000]) + """ + return binary_fbeta_score( + preds=preds, + target=target, + beta=1.0, + threshold=threshold, + multidim_average=multidim_average, + ignore_index=ignore_index, + validate_args=validate_args, + ) + + +def multiclass_f1_score( + preds: Tensor, + target: Tensor, + num_classes: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + top_k: int = 1, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes F-1 score for multiclass tasks: + + .. math:: + F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}} + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + top_k: + Number of highest probability or logit score predictions considered to find the correct label. + Only works when ``preds`` contain probabilities/logits. + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multiclass_f1_score + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> multiclass_f1_score(preds, target, num_classes=3) + tensor(0.7778) + >>> multiclass_f1_score(preds, target, num_classes=3, average=None) + tensor([0.6667, 0.6667, 1.0000]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multiclass_f1_score + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> multiclass_f1_score(preds, target, num_classes=3) + tensor(0.7778) + >>> multiclass_f1_score(preds, target, num_classes=3, average=None) + tensor([0.6667, 0.6667, 1.0000]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multiclass_f1_score + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> multiclass_f1_score(preds, target, num_classes=3, multidim_average='samplewise') + tensor([0.4333, 0.2667]) + >>> multiclass_f1_score(preds, target, num_classes=3, multidim_average='samplewise', average=None) + tensor([[0.8000, 0.0000, 0.5000], + [0.0000, 0.4000, 0.4000]]) + """ + return multiclass_fbeta_score( + preds=preds, + target=target, + beta=1.0, + num_classes=num_classes, + average=average, + top_k=top_k, + multidim_average=multidim_average, + ignore_index=ignore_index, + validate_args=validate_args, + ) + + +def multilabel_f1_score( + preds: Tensor, + target: Tensor, + num_labels: int, + threshold: float = 0.5, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes F-1 score for multilabel tasks: + + .. math:: + F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}} + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multilabel_f1_score + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> multilabel_f1_score(preds, target, num_labels=3) + tensor(0.5556) + >>> multilabel_f1_score(preds, target, num_labels=3, average=None) + tensor([1.0000, 0.0000, 0.6667]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multilabel_f1_score + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> multilabel_f1_score(preds, target, num_labels=3) + tensor(0.5556) + >>> multilabel_f1_score(preds, target, num_labels=3, average=None) + tensor([1.0000, 0.0000, 0.6667]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multilabel_f1_score + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> multilabel_f1_score(preds, target, num_labels=3, multidim_average='samplewise') + tensor([0.4444, 0.0000]) + >>> multilabel_f1_score(preds, target, num_labels=3, multidim_average='samplewise', average=None) + tensor([[0.6667, 0.6667, 0.0000], + [0.0000, 0.0000, 0.0000]]) + """ + return multilabel_fbeta_score( + preds=preds, + target=target, + beta=1.0, + num_labels=num_labels, + threshold=threshold, + average=average, + multidim_average=multidim_average, + ignore_index=ignore_index, + validate_args=validate_args, + ) + + +def fbeta_score( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + beta: float = 1.0, + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + multidim_average: Optional[Literal["global", "samplewise"]] = "global", + top_k: Optional[int] = 1, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `F-score`_ metric: + + .. math:: + F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}} + {(\beta^2 * \text{precision}) + \text{recall}} + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_fbeta_score`, :func:`multiclass_fbeta_score` and :func:`multilabel_fbeta_score` for the specific + details of each argument influence and examples. + + Legacy Example: + >>> target = torch.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = torch.tensor([0, 2, 1, 0, 0, 1]) + >>> fbeta_score(preds, target, task="multiclass", num_classes=3, beta=0.5) + tensor(0.3333) + """ + assert multidim_average is not None + if task == "binary": + return binary_fbeta_score(preds, target, beta, threshold, multidim_average, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + assert isinstance(top_k, int) + return multiclass_fbeta_score( + preds, target, beta, num_classes, average, top_k, multidim_average, ignore_index, validate_args + ) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_fbeta_score( + preds, target, beta, num_labels, threshold, average, multidim_average, ignore_index, validate_args + ) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) + + +def f1_score( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + multidim_average: Optional[Literal["global", "samplewise"]] = "global", + top_k: Optional[int] = 1, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes F-1 score: + + .. math:: + F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}} + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_f1_score`, :func:`multiclass_f1_score` and :func:`multilabel_f1_score` for the specific + details of each argument influence and examples. + + Legacy Example: + >>> target = torch.tensor([0, 1, 2, 0, 1, 2]) + >>> preds = torch.tensor([0, 2, 1, 0, 0, 1]) + >>> f1_score(preds, target, task="multiclass", num_classes=3) + tensor(0.3333) + """ + assert multidim_average is not None + if task == "binary": + return binary_f1_score(preds, target, threshold, multidim_average, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + assert isinstance(top_k, int) + return multiclass_f1_score( + preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args + ) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_f1_score( + preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args + ) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/hinge.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/hinge.py new file mode 100644 index 0000000000000000000000000000000000000000..adc70e9dd394a4d0616de572be21b8773045d325 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/hinge.py @@ -0,0 +1,282 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import torch +from torch import Tensor, tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.confusion_matrix import ( + _binary_confusion_matrix_format, + _binary_confusion_matrix_tensor_validation, + _multiclass_confusion_matrix_format, + _multiclass_confusion_matrix_tensor_validation, +) +from torchmetrics.utilities.data import to_onehot + + +def _hinge_loss_compute(measure: Tensor, total: Tensor) -> Tensor: + return measure / total + + +def _binary_hinge_loss_arg_validation(squared: bool, ignore_index: Optional[int] = None) -> None: + if not isinstance(squared, bool): + raise ValueError(f"Expected argument `squared` to be an bool but got {squared}") + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + + +def _binary_hinge_loss_tensor_validation(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> None: + _binary_confusion_matrix_tensor_validation(preds, target, ignore_index) + if not preds.is_floating_point(): + raise ValueError( + "Expected argument `preds` to be floating tensor with probabilities/logits" + f" but got tensor with dtype {preds.dtype}" + ) + + +def _binary_hinge_loss_update( + preds: Tensor, + target: Tensor, + squared: bool, +) -> Tuple[Tensor, Tensor]: + + target = target.bool() + margin = torch.zeros_like(preds) + margin[target] = preds[target] + margin[~target] = -preds[~target] + + measures = 1 - margin + measures = torch.clamp(measures, 0) + + if squared: + measures = measures.pow(2) + + total = tensor(target.shape[0], device=target.device) + return measures.sum(dim=0), total + + +def binary_hinge_loss( + preds: Tensor, + target: Tensor, + squared: bool = False, + ignore_index: Optional[int] = None, + validate_args: bool = False, +) -> Tensor: + r"""Computes the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs) for binary tasks. It is + defined as: + + .. math:: + \text{Hinge loss} = \max(0, 1 - y \times \hat{y}) + + Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class. + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + squared: + If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss. + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example: + >>> from torchmetrics.functional.classification import binary_hinge_loss + >>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75]) + >>> target = torch.tensor([0, 0, 1, 1, 1]) + >>> binary_hinge_loss(preds, target) + tensor(0.6900) + >>> binary_hinge_loss(preds, target, squared=True) + tensor(0.6905) + """ + if validate_args: + _binary_hinge_loss_arg_validation(squared, ignore_index) + _binary_hinge_loss_tensor_validation(preds, target, ignore_index) + preds, target = _binary_confusion_matrix_format( + preds, target, threshold=0.0, ignore_index=ignore_index, convert_to_labels=False + ) + measures, total = _binary_hinge_loss_update(preds, target, squared) + return _hinge_loss_compute(measures, total) + + +def _multiclass_hinge_loss_arg_validation( + num_classes: int, + squared: bool = False, + multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer", + ignore_index: Optional[int] = None, +) -> None: + _binary_hinge_loss_arg_validation(squared, ignore_index) + if not isinstance(num_classes, int) or num_classes < 2: + raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}") + allowed_mm = ("crammer-singer", "one-vs-all") + if multiclass_mode not in allowed_mm: + raise ValueError(f"Expected argument `multiclass_mode` to be one of {allowed_mm}, but got {multiclass_mode}.") + + +def _multiclass_hinge_loss_tensor_validation( + preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None +) -> None: + _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index) + if not preds.is_floating_point(): + raise ValueError( + "Expected argument `preds` to be floating tensor with probabilities/logits" + f" but got tensor with dtype {preds.dtype}" + ) + + +def _multiclass_hinge_loss_update( + preds: Tensor, + target: Tensor, + squared: bool, + multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer", +) -> Tuple[Tensor, Tensor]: + if not torch.all((0 <= preds) * (preds <= 1)): + preds = preds.softmax(1) + + target = to_onehot(target, max(2, preds.shape[1])).bool() + if multiclass_mode == "crammer-singer": + margin = preds[target] + margin -= torch.max(preds[~target].view(preds.shape[0], -1), dim=1)[0] + else: + target = target.bool() + margin = torch.zeros_like(preds) + margin[target] = preds[target] + margin[~target] = -preds[~target] + + measures = 1 - margin + measures = torch.clamp(measures, 0) + + if squared: + measures = measures.pow(2) + + total = tensor(target.shape[0], device=target.device) + return measures.sum(dim=0), total + + +def multiclass_hinge_loss( + preds: Tensor, + target: Tensor, + num_classes: int, + squared: bool = False, + multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer", + ignore_index: Optional[int] = None, + validate_args: bool = False, +) -> Tensor: + r"""Computes the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs) for multiclass tasks. + + The metric can be computed in two ways. Either, the definition by Crammer and Singer is used: + + .. math:: + \text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right) + + Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes), + and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. Alternatively, the metric can + also be computed in one-vs-all approach, where each class is valued against all other classes in a binary fashion. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + softmax per sample. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + squared: + If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss. + multiclass_mode: + Determines how to compute the metric + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Example: + >>> from torchmetrics.functional.classification import multiclass_hinge_loss + >>> preds = torch.tensor([[0.25, 0.20, 0.55], + ... [0.55, 0.05, 0.40], + ... [0.10, 0.30, 0.60], + ... [0.90, 0.05, 0.05]]) + >>> target = torch.tensor([0, 1, 2, 0]) + >>> multiclass_hinge_loss(preds, target, num_classes=3) + tensor(0.9125) + >>> multiclass_hinge_loss(preds, target, num_classes=3, squared=True) + tensor(1.1131) + >>> multiclass_hinge_loss(preds, target, num_classes=3, multiclass_mode='one-vs-all') + tensor([0.8750, 1.1250, 1.1000]) + """ + if validate_args: + _multiclass_hinge_loss_arg_validation(num_classes, squared, multiclass_mode, ignore_index) + _multiclass_hinge_loss_tensor_validation(preds, target, num_classes, ignore_index) + preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index, convert_to_labels=False) + measures, total = _multiclass_hinge_loss_update(preds, target, squared, multiclass_mode) + return _hinge_loss_compute(measures, total) + + +def hinge_loss( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass"], + num_classes: Optional[int] = None, + squared: bool = False, + multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs). + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of + :func:`binary_hinge_loss` and :func:`multiclass_hinge_loss` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> import torch + >>> target = torch.tensor([0, 1, 1]) + >>> preds = torch.tensor([0.5, 0.7, 0.1]) + >>> hinge_loss(preds, target, task="binary") + tensor(0.9000) + + >>> target = torch.tensor([0, 1, 2]) + >>> preds = torch.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge_loss(preds, target, task="multiclass", num_classes=3) + tensor(1.5551) + + >>> target = torch.tensor([0, 1, 2]) + >>> preds = torch.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]]) + >>> hinge_loss(preds, target, task="multiclass", num_classes=3, multiclass_mode="one-vs-all") + tensor([1.3743, 1.1945, 1.2359]) + """ + if task == "binary": + return binary_hinge_loss(preds, target, squared, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + return multiclass_hinge_loss(preds, target, num_classes, squared, multiclass_mode, ignore_index, validate_args) + raise ValueError(f"Expected argument `task` to either be `'binary'` or `'multilabel'` but got {task}") diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/matthews_corrcoef.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/matthews_corrcoef.py new file mode 100644 index 0000000000000000000000000000000000000000..7535b56d049838a20ba291366c6546b33aeb5631 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/matthews_corrcoef.py @@ -0,0 +1,246 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.confusion_matrix import ( + _binary_confusion_matrix_arg_validation, + _binary_confusion_matrix_format, + _binary_confusion_matrix_tensor_validation, + _binary_confusion_matrix_update, + _multiclass_confusion_matrix_arg_validation, + _multiclass_confusion_matrix_format, + _multiclass_confusion_matrix_tensor_validation, + _multiclass_confusion_matrix_update, + _multilabel_confusion_matrix_arg_validation, + _multilabel_confusion_matrix_format, + _multilabel_confusion_matrix_tensor_validation, + _multilabel_confusion_matrix_update, +) + + +def _matthews_corrcoef_reduce(confmat: Tensor) -> Tensor: + """Reduce an un-normalized confusion matrix of shape (n_classes, n_classes) into the matthews corrcoef + score.""" + # convert multilabel into binary + confmat = confmat.sum(0) if confmat.ndim == 3 else confmat + + tk = confmat.sum(dim=-1).float() + pk = confmat.sum(dim=-2).float() + c = torch.trace(confmat).float() + s = confmat.sum().float() + + cov_ytyp = c * s - sum(tk * pk) + cov_ypyp = s**2 - sum(pk * pk) + cov_ytyt = s**2 - sum(tk * tk) + + denom = cov_ypyp * cov_ytyt + if denom == 0: + return torch.tensor(0, dtype=confmat.dtype, device=confmat.device) + else: + return cov_ytyp / torch.sqrt(denom) + + +def binary_matthews_corrcoef( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Calculates `Matthews correlation coefficient`_ for binary tasks. This metric measures the general + correlation or quality of a classification. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + threshold: Threshold for transforming probability to binary (0,1) predictions + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import binary_matthews_corrcoef + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> binary_matthews_corrcoef(preds, target) + tensor(0.5774) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import binary_matthews_corrcoef + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01]) + >>> binary_matthews_corrcoef(preds, target) + tensor(0.5774) + """ + if validate_args: + _binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize=None) + _binary_confusion_matrix_tensor_validation(preds, target, ignore_index) + preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index) + confmat = _binary_confusion_matrix_update(preds, target) + return _matthews_corrcoef_reduce(confmat) + + +def multiclass_matthews_corrcoef( + preds: Tensor, + target: Tensor, + num_classes: int, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Calculates `Matthews correlation coefficient`_ for multiclass tasks. This metric measures the general + correlation or quality of a classification. + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + num_classes: Integer specifing the number of classes + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (pred is integer tensor): + >>> from torchmetrics.functional.classification import multiclass_matthews_corrcoef + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> multiclass_matthews_corrcoef(preds, target, num_classes=3) + tensor(0.7000) + + Example (pred is float tensor): + >>> from torchmetrics.functional.classification import multiclass_matthews_corrcoef + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> multiclass_matthews_corrcoef(preds, target, num_classes=3) + tensor(0.7000) + """ + if validate_args: + _multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize=None) + _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index) + preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index) + confmat = _multiclass_confusion_matrix_update(preds, target, num_classes) + return _matthews_corrcoef_reduce(confmat) + + +def multilabel_matthews_corrcoef( + preds: Tensor, + target: Tensor, + num_labels: int, + threshold: float = 0.5, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Calculates `Matthews correlation coefficient`_ for multilabel tasks. This metric measures the general + correlation or quality of a classification. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Additional dimension ``...`` will be flattened into the batch dimension. + + Args: + num_classes: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multilabel_matthews_corrcoef + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> multilabel_matthews_corrcoef(preds, target, num_labels=3) + tensor(0.3333) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multilabel_matthews_corrcoef + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> multilabel_matthews_corrcoef(preds, target, num_labels=3) + tensor(0.3333) + """ + if validate_args: + _multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index, normalize=None) + _multilabel_confusion_matrix_tensor_validation(preds, target, num_labels, ignore_index) + preds, target = _multilabel_confusion_matrix_format(preds, target, num_labels, threshold, ignore_index) + confmat = _multilabel_confusion_matrix_update(preds, target, num_labels) + return _matthews_corrcoef_reduce(confmat) + + +def matthews_corrcoef( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"] = None, + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Calculates `Matthews correlation coefficient`_ . This metric measures the general correlation or quality of + a classification. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_matthews_corrcoef`, :func:`multiclass_matthews_corrcoef` and :func:`multilabel_matthews_corrcoef` for + the specific details of each argument influence and examples. + + Legacy Example: + >>> target = torch.tensor([1, 1, 0, 0]) + >>> preds = torch.tensor([0, 1, 0, 0]) + >>> matthews_corrcoef(preds, target, task="multiclass", num_classes=2) + tensor(0.5774) + """ + if task == "binary": + return binary_matthews_corrcoef(preds, target, threshold, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + return multiclass_matthews_corrcoef(preds, target, num_classes, ignore_index, validate_args) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_matthews_corrcoef(preds, target, num_labels, threshold, ignore_index, validate_args) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f5ccf38a6e1b7431c962391520952acaff9ebe --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall.py @@ -0,0 +1,738 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.stat_scores import ( + _binary_stat_scores_arg_validation, + _binary_stat_scores_format, + _binary_stat_scores_tensor_validation, + _binary_stat_scores_update, + _multiclass_stat_scores_arg_validation, + _multiclass_stat_scores_format, + _multiclass_stat_scores_tensor_validation, + _multiclass_stat_scores_update, + _multilabel_stat_scores_arg_validation, + _multilabel_stat_scores_format, + _multilabel_stat_scores_tensor_validation, + _multilabel_stat_scores_update, +) +from torchmetrics.utilities.compute import _safe_divide + + +def _precision_recall_reduce( + stat: Literal["precision", "recall"], + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]], + multidim_average: Literal["global", "samplewise"] = "global", +) -> Tensor: + different_stat = fp if stat == "precision" else fn # this is what differs between the two scores + if average == "binary": + return _safe_divide(tp, tp + different_stat) + elif average == "micro": + tp = tp.sum(dim=0 if multidim_average == "global" else 1) + fn = fn.sum(dim=0 if multidim_average == "global" else 1) + different_stat = different_stat.sum(dim=0 if multidim_average == "global" else 1) + return _safe_divide(tp, tp + different_stat) + else: + score = _safe_divide(tp, tp + different_stat) + if average is None or average == "none": + return score + if average == "weighted": + weights = tp + fn + else: + weights = torch.ones_like(score) + return _safe_divide(weights * score, weights.sum(-1, keepdim=True)).sum(-1) + + +def binary_precision( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Precision`_ for binary tasks: + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + threshold: Threshold for transforming probability to binary {0,1} predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average`` + is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import binary_precision + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> binary_precision(preds, target) + tensor(0.6667) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import binary_precision + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> binary_precision(preds, target) + tensor(0.6667) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import binary_precision + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> binary_precision(preds, target, multidim_average='samplewise') + tensor([0.4000, 0.0000]) + """ + if validate_args: + _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index) + _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index) + preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index) + tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average) + return _precision_recall_reduce("precision", tp, fp, tn, fn, average="binary", multidim_average=multidim_average) + + +def multiclass_precision( + preds: Tensor, + target: Tensor, + num_classes: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + top_k: int = 1, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Precision`_ for multiclass tasks. + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + top_k: + Number of highest probability or logit score predictions considered to find the correct label. + Only works when ``preds`` contain probabilities/logits. + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multiclass_precision + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> multiclass_precision(preds, target, num_classes=3) + tensor(0.8333) + >>> multiclass_precision(preds, target, num_classes=3, average=None) + tensor([1.0000, 0.5000, 1.0000]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multiclass_precision + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> multiclass_precision(preds, target, num_classes=3) + tensor(0.8333) + >>> multiclass_precision(preds, target, num_classes=3, average=None) + tensor([1.0000, 0.5000, 1.0000]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multiclass_precision + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> multiclass_precision(preds, target, num_classes=3, multidim_average='samplewise') + tensor([0.3889, 0.2778]) + >>> multiclass_precision(preds, target, num_classes=3, multidim_average='samplewise', average=None) + tensor([[0.6667, 0.0000, 0.5000], + [0.0000, 0.5000, 0.3333]]) + """ + if validate_args: + _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index) + _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index) + preds, target = _multiclass_stat_scores_format(preds, target, top_k) + tp, fp, tn, fn = _multiclass_stat_scores_update( + preds, target, num_classes, top_k, average, multidim_average, ignore_index + ) + return _precision_recall_reduce("precision", tp, fp, tn, fn, average=average, multidim_average=multidim_average) + + +def multilabel_precision( + preds: Tensor, + target: Tensor, + num_labels: int, + threshold: float = 0.5, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Precision`_ for multilabel tasks. + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multilabel_precision + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> multilabel_precision(preds, target, num_labels=3) + tensor(0.5000) + >>> multilabel_precision(preds, target, num_labels=3, average=None) + tensor([1.0000, 0.0000, 0.5000]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multilabel_precision + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> multilabel_precision(preds, target, num_labels=3) + tensor(0.5000) + >>> multilabel_precision(preds, target, num_labels=3, average=None) + tensor([1.0000, 0.0000, 0.5000]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multilabel_precision + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> multilabel_precision(preds, target, num_labels=3, multidim_average='samplewise') + tensor([0.3333, 0.0000]) + >>> multilabel_precision(preds, target, num_labels=3, multidim_average='samplewise', average=None) + tensor([[0.5000, 0.5000, 0.0000], + [0.0000, 0.0000, 0.0000]]) + """ + if validate_args: + _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index) + _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index) + preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index) + tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average) + return _precision_recall_reduce("precision", tp, fp, tn, fn, average=average, multidim_average=multidim_average) + + +def binary_recall( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Recall`_ for binary tasks: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + threshold: Threshold for transforming probability to binary {0,1} predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average`` + is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample. + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import binary_recall + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> binary_recall(preds, target) + tensor(0.6667) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import binary_recall + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> binary_recall(preds, target) + tensor(0.6667) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import binary_recall + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> binary_recall(preds, target, multidim_average='samplewise') + tensor([0.6667, 0.0000]) + """ + if validate_args: + _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index) + _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index) + preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index) + tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average) + return _precision_recall_reduce("recall", tp, fp, tn, fn, average="binary", multidim_average=multidim_average) + + +def multiclass_recall( + preds: Tensor, + target: Tensor, + num_classes: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + top_k: int = 1, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Recall`_ for multiclass tasks: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + top_k: + Number of highest probability or logit score predictions considered to find the correct label. + Only works when ``preds`` contain probabilities/logits. + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multiclass_recall + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> multiclass_recall(preds, target, num_classes=3) + tensor(0.8333) + >>> multiclass_recall(preds, target, num_classes=3, average=None) + tensor([0.5000, 1.0000, 1.0000]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multiclass_recall + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> multiclass_recall(preds, target, num_classes=3) + tensor(0.8333) + >>> multiclass_recall(preds, target, num_classes=3, average=None) + tensor([0.5000, 1.0000, 1.0000]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multiclass_recall + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> multiclass_recall(preds, target, num_classes=3, multidim_average='samplewise') + tensor([0.5000, 0.2778]) + >>> multiclass_recall(preds, target, num_classes=3, multidim_average='samplewise', average=None) + tensor([[1.0000, 0.0000, 0.5000], + [0.0000, 0.3333, 0.5000]]) + """ + if validate_args: + _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index) + _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index) + preds, target = _multiclass_stat_scores_format(preds, target, top_k) + tp, fp, tn, fn = _multiclass_stat_scores_update( + preds, target, num_classes, top_k, average, multidim_average, ignore_index + ) + return _precision_recall_reduce("recall", tp, fp, tn, fn, average=average, multidim_average=multidim_average) + + +def multilabel_recall( + preds: Tensor, + target: Tensor, + num_labels: int, + threshold: float = 0.5, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Recall`_ for multilabel tasks: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The returned shape depends on the ``average`` and ``multidim_average`` arguments: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor + - If ``average=None/'none'``, the shape will be ``(C,)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)`` + - If ``average=None/'none'``, the shape will be ``(N, C)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multilabel_recall + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> multilabel_recall(preds, target, num_labels=3) + tensor(0.6667) + >>> multilabel_recall(preds, target, num_labels=3, average=None) + tensor([1., 0., 1.]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multilabel_recall + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> multilabel_recall(preds, target, num_labels=3) + tensor(0.6667) + >>> multilabel_recall(preds, target, num_labels=3, average=None) + tensor([1., 0., 1.]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multilabel_recall + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> multilabel_recall(preds, target, num_labels=3, multidim_average='samplewise') + tensor([0.6667, 0.0000]) + >>> multilabel_recall(preds, target, num_labels=3, multidim_average='samplewise', average=None) + tensor([[1., 1., 0.], + [0., 0., 0.]]) + """ + if validate_args: + _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index) + _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index) + preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index) + tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average) + return _precision_recall_reduce("recall", tp, fp, tn, fn, average=average, multidim_average=multidim_average) + + +def precision( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + multidim_average: Optional[Literal["global", "samplewise"]] = "global", + top_k: Optional[int] = 1, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Precision`_: + + .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}} + + Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and + false positives respecitively. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_precision`, :func:`multiclass_precision` and :func:`multilabel_precision` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> preds = torch.tensor([2, 0, 2, 1]) + >>> target = torch.tensor([1, 1, 2, 0]) + >>> precision(preds, target, task="multiclass", average='macro', num_classes=3) + tensor(0.1667) + >>> precision(preds, target, task="multiclass", average='micro', num_classes=3) + tensor(0.2500) + """ + assert multidim_average is not None + if task == "binary": + return binary_precision(preds, target, threshold, multidim_average, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + assert isinstance(top_k, int) + return multiclass_precision( + preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args + ) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_precision( + preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args + ) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) + + +def recall( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + multidim_average: Optional[Literal["global", "samplewise"]] = "global", + top_k: Optional[int] = 1, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes `Recall`_: + + .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}} + + Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and + false negatives respecitively. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_recall`, :func:`multiclass_recall` and :func:`multilabel_recall` for the specific details of + each argument influence and examples. + + Legacy Example: + >>> preds = torch.tensor([2, 0, 2, 1]) + >>> target = torch.tensor([1, 1, 2, 0]) + >>> recall(preds, target, task="multiclass", average='macro', num_classes=3) + tensor(0.3333) + >>> recall(preds, target, task="multiclass", average='micro', num_classes=3) + tensor(0.2500) + """ + assert multidim_average is not None + if task == "binary": + return binary_recall(preds, target, threshold, multidim_average, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + assert isinstance(top_k, int) + return multiclass_recall( + preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args + ) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_recall( + preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args + ) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall_curve.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall_curve.py new file mode 100644 index 0000000000000000000000000000000000000000..5b08e366a6665f62b3d424486b1084e6ccbc8d31 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall_curve.py @@ -0,0 +1,834 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Sequence, Tuple, Union + +import torch +from torch import Tensor, tensor +from torch.nn import functional as F +from typing_extensions import Literal + +from torchmetrics.utilities.checks import _check_same_shape +from torchmetrics.utilities.compute import _safe_divide +from torchmetrics.utilities.data import _bincount + + +def _binary_clf_curve( + preds: Tensor, + target: Tensor, + sample_weights: Optional[Sequence] = None, + pos_label: int = 1, +) -> Tuple[Tensor, Tensor, Tensor]: + """Calculates the tps and false positives for all unique thresholds in the preds tensor. Adapted from + https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/metrics/_ranking.py. + + Args: + preds: 1d tensor with predictions + target: 1d tensor with true values + sample_weights: a 1d tensor with a weight per sample + pos_label: interger determining what the positive class in target tensor is + + Returns: + fps: 1d tensor with false positives for different thresholds + tps: 1d tensor with true positives for different thresholds + thresholds: the unique thresholds use for calculating fps and tps + """ + with torch.no_grad(): + if sample_weights is not None and not isinstance(sample_weights, Tensor): + sample_weights = tensor(sample_weights, device=preds.device, dtype=torch.float) + + # remove class dimension if necessary + if preds.ndim > target.ndim: + preds = preds[:, 0] + desc_score_indices = torch.argsort(preds, descending=True) + + preds = preds[desc_score_indices] + target = target[desc_score_indices] + + if sample_weights is not None: + weight = sample_weights[desc_score_indices] + else: + weight = 1.0 + + # pred typically has many tied values. Here we extract + # the indices associated with the distinct values. We also + # concatenate a value for the end of the curve. + distinct_value_indices = torch.where(preds[1:] - preds[:-1])[0] + threshold_idxs = F.pad(distinct_value_indices, [0, 1], value=target.size(0) - 1) + target = (target == pos_label).to(torch.long) + tps = torch.cumsum(target * weight, dim=0)[threshold_idxs] + + if sample_weights is not None: + # express fps as a cumsum to ensure fps is increasing even in + # the presence of floating point errors + fps = torch.cumsum((1 - target) * weight, dim=0)[threshold_idxs] + else: + fps = 1 + threshold_idxs - tps + + return fps, tps, preds[threshold_idxs] + + +def _adjust_threshold_arg( + thresholds: Optional[Union[int, List[float], Tensor]] = None, device: Optional[torch.device] = None +) -> Optional[Tensor]: + """Utility function for converting the threshold arg for list and int to tensor format.""" + if isinstance(thresholds, int): + thresholds = torch.linspace(0, 1, thresholds, device=device) + if isinstance(thresholds, list): + thresholds = torch.tensor(thresholds, device=device) + return thresholds + + +def _binary_precision_recall_curve_arg_validation( + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> None: + """Validate non tensor input. + + - ``threshold`` has to be None | a 1d tensor | a list of floats in the [0,1] range | an int + - ``ignore_index`` has to be None or int + """ + if thresholds is not None and not isinstance(thresholds, (list, int, Tensor)): + raise ValueError( + "Expected argument `thresholds` to either be an integer, list of floats or" + f" tensor of floats, but got {thresholds}" + ) + else: + if isinstance(thresholds, int) and thresholds < 2: + raise ValueError( + f"If argument `thresholds` is an integer, expected it to be larger than 1, but got {thresholds}" + ) + if isinstance(thresholds, list) and not all(isinstance(t, float) and 0 <= t <= 1 for t in thresholds): + raise ValueError( + "If argument `thresholds` is a list, expected all elements to be floats in the [0,1] range," + f" but got {thresholds}" + ) + if isinstance(thresholds, Tensor) and not thresholds.ndim == 1: + raise ValueError("If argument `thresholds` is an tensor, expected the tensor to be 1d") + + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + + +def _binary_precision_recall_curve_tensor_validation( + preds: Tensor, target: Tensor, ignore_index: Optional[int] = None +) -> None: + """Validate tensor input. + + - tensors have to be of same shape + - all values in target tensor that are not ignored have to be in {0, 1} + - that the pred tensor is floating point + """ + _check_same_shape(preds, target) + + if target.is_floating_point(): + raise ValueError( + "Expected argument `target` to be an int or long tensor with ground truth labels" + f" but got tensor with dtype {target.dtype}" + ) + + if not preds.is_floating_point(): + raise ValueError( + "Expected argument `preds` to be an floating tensor with probability/logit scores," + f" but got tensor with dtype {preds.dtype}" + ) + + # Check that target only contains {0,1} values or value in ignore_index + unique_values = torch.unique(target) + if ignore_index is None: + check = torch.any((unique_values != 0) & (unique_values != 1)) + else: + check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index)) + if check: + raise RuntimeError( + f"Detected the following values in `target`: {unique_values} but expected only" + f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}." + ) + + +def _binary_precision_recall_curve_format( + preds: Tensor, + target: Tensor, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> Tuple[Tensor, Tensor, Optional[Tensor]]: + """Convert all input to the right format. + + - flattens additional dimensions + - Remove all datapoints that should be ignored + - Applies sigmoid if pred tensor not in [0,1] range + - Format thresholds arg to be a tensor + """ + preds = preds.flatten() + target = target.flatten() + if ignore_index is not None: + idx = target != ignore_index + preds = preds[idx] + target = target[idx] + + if not torch.all((0 <= preds) * (preds <= 1)): + preds = preds.sigmoid() + + thresholds = _adjust_threshold_arg(thresholds, preds.device) + return preds, target, thresholds + + +def _binary_precision_recall_curve_update( + preds: Tensor, + target: Tensor, + thresholds: Optional[Tensor], +) -> Union[Tensor, Tuple[Tensor, Tensor]]: + """Returns the state to calculate the pr-curve with. + + If thresholds is `None` the direct preds and targets are used. If thresholds is not `None` we compute a multi + threshold confusion matrix. + """ + if thresholds is None: + return preds, target + len_t = len(thresholds) + preds_t = (preds.unsqueeze(-1) >= thresholds.unsqueeze(0)).long() # num_samples x num_thresholds + unique_mapping = preds_t + 2 * target.unsqueeze(-1) + 4 * torch.arange(len_t, device=target.device) + bins = _bincount(unique_mapping.flatten(), minlength=4 * len_t) + return bins.reshape(len_t, 2, 2) + + +def _binary_precision_recall_curve_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + thresholds: Optional[Tensor], + pos_label: int = 1, +) -> Tuple[Tensor, Tensor, Tensor]: + """Computes the final pr-curve. + + If state is a single tensor, then we calculate the pr-curve from a multi threshold confusion matrix. If state is + original input, then we dynamically compute the binary classification curve. + """ + if isinstance(state, Tensor): + tps = state[:, 1, 1] + fps = state[:, 0, 1] + fns = state[:, 1, 0] + precision = _safe_divide(tps, tps + fps) + recall = _safe_divide(tps, tps + fns) + precision = torch.cat([precision, torch.ones(1, dtype=precision.dtype, device=precision.device)]) + recall = torch.cat([recall, torch.zeros(1, dtype=recall.dtype, device=recall.device)]) + return precision, recall, thresholds + else: + fps, tps, thresholds = _binary_clf_curve(state[0], state[1], pos_label=pos_label) + precision = tps / (tps + fps) + recall = tps / tps[-1] + + # stop when full recall attained and reverse the outputs so recall is decreasing + last_ind = torch.where(tps == tps[-1])[0][0] + sl = slice(0, last_ind.item() + 1) + + # need to call reversed explicitly, since including that to slice would + # introduce negative strides that are not yet supported in pytorch + precision = torch.cat([reversed(precision[sl]), torch.ones(1, dtype=precision.dtype, device=precision.device)]) + recall = torch.cat([reversed(recall[sl]), torch.zeros(1, dtype=recall.dtype, device=recall.device)]) + thresholds = reversed(thresholds[sl]).detach().clone() # type: ignore + + return precision, recall, thresholds + + +def binary_precision_recall_curve( + preds: Tensor, + target: Tensor, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tuple[Tensor, Tensor, Tensor]: + r"""Computes the precision-recall curve for binary tasks. The curve consist of multiple pairs of precision and + recall values evaluated at different thresholds, such that the tradeoff between the two values can been seen. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class. + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + (tuple): a tuple of 3 tensors containing: + + - precision: an 1d tensor of size (n_thresholds+1, ) with precision values + - recall: an 1d tensor of size (n_thresholds+1, ) with recall values + - thresholds: an 1d tensor of size (n_thresholds, ) with increasing threshold values + + Example: + >>> from torchmetrics.functional.classification import binary_precision_recall_curve + >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) + >>> target = torch.tensor([0, 1, 1, 0]) + >>> binary_precision_recall_curve(preds, target, thresholds=None) # doctest: +NORMALIZE_WHITESPACE + (tensor([0.6667, 0.5000, 0.0000, 1.0000]), + tensor([1.0000, 0.5000, 0.0000, 0.0000]), + tensor([0.5000, 0.7000, 0.8000])) + >>> binary_precision_recall_curve(preds, target, thresholds=5) # doctest: +NORMALIZE_WHITESPACE + (tensor([0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000]), + tensor([1., 1., 1., 0., 0., 0.]), + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])) + """ + if validate_args: + _binary_precision_recall_curve_arg_validation(thresholds, ignore_index) + _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index) + preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index) + state = _binary_precision_recall_curve_update(preds, target, thresholds) + return _binary_precision_recall_curve_compute(state, thresholds) + + +def _multiclass_precision_recall_curve_arg_validation( + num_classes: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> None: + """Validate non tensor input. + + - ``num_classes`` has to be an int larger than 1 + - ``threshold`` has to be None | a 1d tensor | a list of floats in the [0,1] range | an int + - ``ignore_index`` has to be None or int + """ + if not isinstance(num_classes, int) or num_classes < 2: + raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}") + _binary_precision_recall_curve_arg_validation(thresholds, ignore_index) + + +def _multiclass_precision_recall_curve_tensor_validation( + preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None +) -> None: + """Validate tensor input. + + - target should have one more dimension than preds and all dimensions except for preds.shape[1] should match + exactly. preds.shape[1] should have size equal to number of classes + - all values in target tensor that are not ignored have to be in {0, 1} + """ + if not preds.ndim == target.ndim + 1: + raise ValueError( + f"Expected `preds` to have one more dimension than `target` but got {preds.ndim} and {target.ndim}" + ) + if target.is_floating_point(): + raise ValueError( + f"Expected argument `target` to be an int or long tensor, but got tensor with dtype {target.dtype}" + ) + if not preds.is_floating_point(): + raise ValueError(f"Expected `preds` to be a float tensor, but got {preds.dtype}") + if preds.shape[1] != num_classes: + raise ValueError( + "Expected `preds.shape[1]` to be equal to the number of classes but" + f" got {preds.shape[1]} and {num_classes}." + ) + if preds.shape[0] != target.shape[0] or preds.shape[2:] != target.shape[1:]: + raise ValueError( + "Expected the shape of `preds` should be (N, C, ...) and the shape of `target` should be (N, ...)" + f" but got {preds.shape} and {target.shape}" + ) + + num_unique_values = len(torch.unique(target)) + if ignore_index is None: + check = num_unique_values > num_classes + else: + check = num_unique_values > num_classes + 1 + if check: + raise RuntimeError( + "Detected more unique values in `target` than `num_classes`. Expected only " + f"{num_classes if ignore_index is None else num_classes + 1} but found " + f"{num_unique_values} in `target`." + ) + + +def _multiclass_precision_recall_curve_format( + preds: Tensor, + target: Tensor, + num_classes: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> Tuple[Tensor, Tensor, Optional[Tensor]]: + """Convert all input to the right format. + + - flattens additional dimensions + - Remove all datapoints that should be ignored + - Applies softmax if pred tensor not in [0,1] range + - Format thresholds arg to be a tensor + """ + preds = preds.transpose(0, 1).reshape(num_classes, -1).T + target = target.flatten() + + if ignore_index is not None: + idx = target != ignore_index + preds = preds[idx] + target = target[idx] + + if not torch.all((0 <= preds) * (preds <= 1)): + preds = preds.softmax(1) + + thresholds = _adjust_threshold_arg(thresholds, preds.device) + return preds, target, thresholds + + +def _multiclass_precision_recall_curve_update( + preds: Tensor, + target: Tensor, + num_classes: int, + thresholds: Optional[Tensor], +) -> Union[Tensor, Tuple[Tensor, Tensor]]: + """Returns the state to calculate the pr-curve with. + + If thresholds is `None` the direct preds and targets are used. If thresholds is not `None` we compute a multi + threshold confusion matrix. + """ + if thresholds is None: + return preds, target + len_t = len(thresholds) + # num_samples x num_classes x num_thresholds + preds_t = (preds.unsqueeze(-1) >= thresholds.unsqueeze(0).unsqueeze(0)).long() + target_t = torch.nn.functional.one_hot(target, num_classes=num_classes) + unique_mapping = preds_t + 2 * target_t.unsqueeze(-1) + unique_mapping += 4 * torch.arange(num_classes, device=preds.device).unsqueeze(0).unsqueeze(-1) + unique_mapping += 4 * num_classes * torch.arange(len_t, device=preds.device) + bins = _bincount(unique_mapping.flatten(), minlength=4 * num_classes * len_t) + return bins.reshape(len_t, num_classes, 2, 2) + + +def _multiclass_precision_recall_curve_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + num_classes: int, + thresholds: Optional[Tensor], +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes the final pr-curve. + + If state is a single tensor, then we calculate the pr-curve from a multi threshold confusion matrix. If state is + original input, then we dynamically compute the binary classification curve in an iterative way. + """ + if isinstance(state, Tensor): + tps = state[:, :, 1, 1] + fps = state[:, :, 0, 1] + fns = state[:, :, 1, 0] + precision = _safe_divide(tps, tps + fps) + recall = _safe_divide(tps, tps + fns) + precision = torch.cat([precision, torch.ones(1, num_classes, dtype=precision.dtype, device=precision.device)]) + recall = torch.cat([recall, torch.zeros(1, num_classes, dtype=recall.dtype, device=recall.device)]) + return precision.T, recall.T, thresholds + else: + precision, recall, thresholds = [], [], [] + for i in range(num_classes): + res = _binary_precision_recall_curve_compute([state[0][:, i], state[1]], thresholds=None, pos_label=i) + precision.append(res[0]) + recall.append(res[1]) + thresholds.append(res[2]) + return precision, recall, thresholds + + +def multiclass_precision_recall_curve( + preds: Tensor, + target: Tensor, + num_classes: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Computes the precision-recall curve for multiclass tasks. The curve consist of multiple pairs of precision + and recall values evaluated at different thresholds, such that the tradeoff between the two values can been + seen. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + softmax per sample. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + (tuple): a tuple of either 3 tensors or 3 lists containing + + - precision: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, ) + with precision values (length may differ between classes). If `thresholds` is set to something else, + then a single 2d tensor of size (n_classes, n_thresholds+1) with precision values is returned. + - recall: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, ) + with recall values (length may differ between classes). If `thresholds` is set to something else, + then a single 2d tensor of size (n_classes, n_thresholds+1) with recall values is returned. + - thresholds: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds, ) + with increasing threshold values (length may differ between classes). If `threshold` is set to something else, + then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all classes. + + Example: + >>> from torchmetrics.functional.classification import multiclass_precision_recall_curve + >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> precision, recall, thresholds = multiclass_precision_recall_curve( + ... preds, target, num_classes=5, thresholds=None + ... ) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] + >>> multiclass_precision_recall_curve( + ... preds, target, num_classes=5, thresholds=5 + ... ) # doctest: +NORMALIZE_WHITESPACE + (tensor([[0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000], + [0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000], + [0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000], + [0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]), + tensor([[1., 1., 1., 1., 0., 0.], + [1., 1., 1., 1., 0., 0.], + [1., 0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0.]]), + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])) + """ + if validate_args: + _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index) + _multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index) + preds, target, thresholds = _multiclass_precision_recall_curve_format( + preds, target, num_classes, thresholds, ignore_index + ) + state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds) + return _multiclass_precision_recall_curve_compute(state, num_classes, thresholds) + + +def _multilabel_precision_recall_curve_arg_validation( + num_labels: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> None: + """Validate non tensor input. + + - ``num_labels`` has to be an int larger than 1 + - ``threshold`` has to be None | a 1d tensor | a list of floats in the [0,1] range | an int + - ``ignore_index`` has to be None or int + """ + _multiclass_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index) + + +def _multilabel_precision_recall_curve_tensor_validation( + preds: Tensor, target: Tensor, num_labels: int, ignore_index: Optional[int] = None +) -> None: + """Validate tensor input. + + - tensors have to be of same shape + - preds.shape[1] is equal to the number of labels + - all values in target tensor that are not ignored have to be in {0, 1} + - that the pred tensor is floating point + """ + _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index) + if preds.shape[1] != num_labels: + raise ValueError( + "Expected both `target.shape[1]` and `preds.shape[1]` to be equal to the number of labels" + f" but got {preds.shape[1]} and expected {num_labels}" + ) + + +def _multilabel_precision_recall_curve_format( + preds: Tensor, + target: Tensor, + num_labels: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> Tuple[Tensor, Tensor, Optional[Tensor]]: + """Convert all input to the right format. + + - flattens additional dimensions + - Mask all datapoints that should be ignored with negative values + - Applies sigmoid if pred tensor not in [0,1] range + - Format thresholds arg to be a tensor + """ + preds = preds.transpose(0, 1).reshape(num_labels, -1).T + target = target.transpose(0, 1).reshape(num_labels, -1).T + if not torch.all((0 <= preds) * (preds <= 1)): + preds = preds.sigmoid() + + thresholds = _adjust_threshold_arg(thresholds, preds.device) + if ignore_index is not None and thresholds is not None: + preds = preds.clone() + target = target.clone() + # Make sure that when we map, it will always result in a negative number that we can filter away + idx = target == ignore_index + preds[idx] = -4 * num_labels * (len(thresholds) if thresholds is not None else 1) + target[idx] = -4 * num_labels * (len(thresholds) if thresholds is not None else 1) + + return preds, target, thresholds + + +def _multilabel_precision_recall_curve_update( + preds: Tensor, + target: Tensor, + num_labels: int, + thresholds: Optional[Tensor], +) -> Union[Tensor, Tuple[Tensor, Tensor]]: + """Returns the state to calculate the pr-curve with. + + If thresholds is `None` the direct preds and targets are used. If thresholds is not `None` we compute a multi + threshold confusion matrix. + """ + if thresholds is None: + return preds, target + len_t = len(thresholds) + # num_samples x num_labels x num_thresholds + preds_t = (preds.unsqueeze(-1) >= thresholds.unsqueeze(0).unsqueeze(0)).long() + unique_mapping = preds_t + 2 * target.unsqueeze(-1) + unique_mapping += 4 * torch.arange(num_labels, device=preds.device).unsqueeze(0).unsqueeze(-1) + unique_mapping += 4 * num_labels * torch.arange(len_t, device=preds.device) + unique_mapping = unique_mapping[unique_mapping >= 0] + bins = _bincount(unique_mapping, minlength=4 * num_labels * len_t) + return bins.reshape(len_t, num_labels, 2, 2) + + +def _multilabel_precision_recall_curve_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + num_labels: int, + thresholds: Optional[Tensor], + ignore_index: Optional[int] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + """Computes the final pr-curve. + + If state is a single tensor, then we calculate the pr-curve from a multi threshold confusion matrix. If state is + original input, then we dynamically compute the binary classification curve in an iterative way. + """ + if isinstance(state, Tensor): + tps = state[:, :, 1, 1] + fps = state[:, :, 0, 1] + fns = state[:, :, 1, 0] + precision = _safe_divide(tps, tps + fps) + recall = _safe_divide(tps, tps + fns) + precision = torch.cat([precision, torch.ones(1, num_labels, dtype=precision.dtype, device=precision.device)]) + recall = torch.cat([recall, torch.zeros(1, num_labels, dtype=recall.dtype, device=recall.device)]) + return precision.T, recall.T, thresholds + else: + precision, recall, thresholds = [], [], [] + for i in range(num_labels): + preds = state[0][:, i] + target = state[1][:, i] + if ignore_index is not None: + idx = target == ignore_index + preds = preds[~idx] + target = target[~idx] + res = _binary_precision_recall_curve_compute([preds, target], thresholds=None, pos_label=1) + precision.append(res[0]) + recall.append(res[1]) + thresholds.append(res[2]) + return precision, recall, thresholds + + +def multilabel_precision_recall_curve( + preds: Tensor, + target: Tensor, + num_labels: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Computes the precision-recall curve for multilabel tasks. The curve consist of multiple pairs of precision + and recall values evaluated at different thresholds, such that the tradeoff between the two values can been + seen. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + (tuple): a tuple of either 3 tensors or 3 lists containing + + - precision: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, ) + with precision values (length may differ between labels). If `thresholds` is set to something else, + then a single 2d tensor of size (n_labels, n_thresholds+1) with precision values is returned. + - recall: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, ) + with recall values (length may differ between labels). If `thresholds` is set to something else, + then a single 2d tensor of size (n_labels, n_thresholds+1) with recall values is returned. + - thresholds: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds, ) + with increasing threshold values (length may differ between labels). If `threshold` is set to something else, + then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all labels. + + Example: + >>> from torchmetrics.functional.classification import multilabel_precision_recall_curve + >>> preds = torch.tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = torch.tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) + >>> precision, recall, thresholds = multilabel_precision_recall_curve( + ... preds, target, num_labels=3, thresholds=None + ... ) + >>> precision # doctest: +NORMALIZE_WHITESPACE + [tensor([0.5000, 0.5000, 1.0000, 1.0000]), tensor([0.6667, 0.5000, 0.0000, 1.0000]), + tensor([0.7500, 1.0000, 1.0000, 1.0000])] + >>> recall # doctest: +NORMALIZE_WHITESPACE + [tensor([1.0000, 0.5000, 0.5000, 0.0000]), tensor([1.0000, 0.5000, 0.0000, 0.0000]), + tensor([1.0000, 0.6667, 0.3333, 0.0000])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0500, 0.4500, 0.7500]), tensor([0.5500, 0.6500, 0.7500]), + tensor([0.0500, 0.3500, 0.7500])] + >>> multilabel_precision_recall_curve( + ... preds, target, num_labels=3, thresholds=5 + ... ) # doctest: +NORMALIZE_WHITESPACE + (tensor([[0.5000, 0.5000, 1.0000, 1.0000, 0.0000, 1.0000], + [0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000], + [0.7500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000]]), + tensor([[1.0000, 0.5000, 0.5000, 0.5000, 0.0000, 0.0000], + [1.0000, 1.0000, 1.0000, 0.0000, 0.0000, 0.0000], + [1.0000, 0.6667, 0.3333, 0.3333, 0.0000, 0.0000]]), + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])) + """ + if validate_args: + _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index) + _multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index) + preds, target, thresholds = _multilabel_precision_recall_curve_format( + preds, target, num_labels, thresholds, ignore_index + ) + state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds) + return _multilabel_precision_recall_curve_compute(state, num_labels, thresholds, ignore_index) + + +def precision_recall_curve( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], Tensor]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Computes the precision-recall curve. The curve consist of multiple pairs of precision and recall values + evaluated at different thresholds, such that the tradeoff between the two values can been seen. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_precision_recall_curve`, :func:`multiclass_precision_recall_curve` and + :func:`multilabel_precision_recall_curve` for the specific details of each argument influence and examples. + + Legacy Example: + >>> pred = torch.tensor([0.0, 1.0, 2.0, 3.0]) + >>> target = torch.tensor([0, 1, 1, 0]) + >>> precision, recall, thresholds = precision_recall_curve(pred, target, task='binary') + >>> precision + tensor([0.6667, 0.5000, 0.0000, 1.0000]) + >>> recall + tensor([1.0000, 0.5000, 0.0000, 0.0000]) + >>> thresholds + tensor([0.7311, 0.8808, 0.9526]) + + >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> precision, recall, thresholds = precision_recall_curve(pred, target, task='multiclass', num_classes=5) + >>> precision + [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]), + tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])] + >>> recall + [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])] + >>> thresholds + [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])] + """ + if task == "binary": + return binary_precision_recall_curve(preds, target, thresholds, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + return multiclass_precision_recall_curve(preds, target, num_classes, thresholds, ignore_index, validate_args) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_precision_recall_curve(preds, target, num_labels, thresholds, ignore_index, validate_args) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/recall_at_fixed_precision.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/recall_at_fixed_precision.py new file mode 100644 index 0000000000000000000000000000000000000000..8c0e9f38578ca9d85d32576da6f03b98d5a45b95 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/recall_at_fixed_precision.py @@ -0,0 +1,401 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.precision_recall_curve import ( + _binary_precision_recall_curve_arg_validation, + _binary_precision_recall_curve_compute, + _binary_precision_recall_curve_format, + _binary_precision_recall_curve_tensor_validation, + _binary_precision_recall_curve_update, + _multiclass_precision_recall_curve_arg_validation, + _multiclass_precision_recall_curve_compute, + _multiclass_precision_recall_curve_format, + _multiclass_precision_recall_curve_tensor_validation, + _multiclass_precision_recall_curve_update, + _multilabel_precision_recall_curve_arg_validation, + _multilabel_precision_recall_curve_compute, + _multilabel_precision_recall_curve_format, + _multilabel_precision_recall_curve_tensor_validation, + _multilabel_precision_recall_curve_update, +) + + +def _recall_at_precision( + precision: Tensor, + recall: Tensor, + thresholds: Tensor, + min_precision: float, +) -> Tuple[Tensor, Tensor]: + try: + max_recall, _, best_threshold = max( + (r, p, t) for p, r, t in zip(precision, recall, thresholds) if p >= min_precision + ) + + except ValueError: + max_recall = torch.tensor(0.0, device=recall.device, dtype=recall.dtype) + best_threshold = torch.tensor(0) + + if max_recall == 0.0: + best_threshold = torch.tensor(1e6, device=thresholds.device, dtype=thresholds.dtype) + + return max_recall, best_threshold + + +def _binary_recall_at_fixed_precision_arg_validation( + min_precision: float, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> None: + _binary_precision_recall_curve_arg_validation(thresholds, ignore_index) + if not isinstance(min_precision, float) and not (0 <= min_precision <= 1): + raise ValueError( + f"Expected argument `min_precision` to be an float in the [0,1] range, but got {min_precision}" + ) + + +def _binary_recall_at_fixed_precision_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + thresholds: Optional[Tensor], + min_precision: float, + pos_label: int = 1, +) -> Tuple[Tensor, Tensor]: + precision, recall, thresholds = _binary_precision_recall_curve_compute(state, thresholds, pos_label) + return _recall_at_precision(precision, recall, thresholds, min_precision) + + +def binary_recall_at_fixed_precision( + preds: Tensor, + target: Tensor, + min_precision: float, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tuple[Tensor, Tensor]: + r"""Computes the highest possible recall value given the minimum precision thresholds provided for binary tasks. + This is done by first calculating the precision-recall curve for different thresholds and the find the recall + for a given precision level. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class. + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + min_precision: float value specifying minimum precision threshold. + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + (tuple): a tuple of 2 tensors containing: + + - recall: an scalar tensor with the maximum recall for the given precision level + - threshold: an scalar tensor with the corresponding threshold level + + Example: + >>> from torchmetrics.functional.classification import binary_recall_at_fixed_precision + >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) + >>> target = torch.tensor([0, 1, 1, 0]) + >>> binary_recall_at_fixed_precision(preds, target, min_precision=0.5, thresholds=None) + (tensor(1.), tensor(0.5000)) + >>> binary_recall_at_fixed_precision(preds, target, min_precision=0.5, thresholds=5) + (tensor(1.), tensor(0.5000)) + """ + if validate_args: + _binary_recall_at_fixed_precision_arg_validation(min_precision, thresholds, ignore_index) + _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index) + preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index) + state = _binary_precision_recall_curve_update(preds, target, thresholds) + return _binary_recall_at_fixed_precision_compute(state, thresholds, min_precision) + + +def _multiclass_recall_at_fixed_precision_arg_validation( + num_classes: int, + min_precision: float, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> None: + _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index) + if not isinstance(min_precision, float) and not (0 <= min_precision <= 1): + raise ValueError( + f"Expected argument `min_precision` to be an float in the [0,1] range, but got {min_precision}" + ) + + +def _multiclass_recall_at_fixed_precision_arg_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + num_classes: int, + thresholds: Optional[Tensor], + min_precision: float, +) -> Tuple[Tensor, Tensor]: + precision, recall, thresholds = _multiclass_precision_recall_curve_compute(state, num_classes, thresholds) + if isinstance(state, Tensor): + res = [_recall_at_precision(p, r, thresholds, min_precision) for p, r in zip(precision, recall)] + else: + res = [_recall_at_precision(p, r, t, min_precision) for p, r, t in zip(precision, recall, thresholds)] + recall = torch.stack([r[0] for r in res]) + thresholds = torch.stack([r[1] for r in res]) + return recall, thresholds + + +def multiclass_recall_at_fixed_precision( + preds: Tensor, + target: Tensor, + num_classes: int, + min_precision: float, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tuple[Tensor, Tensor]: + r"""Computes the highest possible recall value given the minimum precision thresholds provided for multiclass + tasks. This is done by first calculating the precision-recall curve for different thresholds and the find the + recall for a given precision level. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + softmax per sample. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + min_precision: float value specifying minimum precision threshold. + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + (tuple): a tuple of either 2 tensors or 2 lists containing + + - recall: an 1d tensor of size (n_classes, ) with the maximum recall for the given precision level per class + - thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class + + Example: + >>> from torchmetrics.functional.classification import multiclass_recall_at_fixed_precision + >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> multiclass_recall_at_fixed_precision(preds, target, num_classes=5, min_precision=0.5, thresholds=None) + (tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06])) + >>> multiclass_recall_at_fixed_precision(preds, target, num_classes=5, min_precision=0.5, thresholds=5) + (tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06])) + """ + if validate_args: + _multiclass_recall_at_fixed_precision_arg_validation(num_classes, min_precision, thresholds, ignore_index) + _multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index) + preds, target, thresholds = _multiclass_precision_recall_curve_format( + preds, target, num_classes, thresholds, ignore_index + ) + state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds) + return _multiclass_recall_at_fixed_precision_arg_compute(state, num_classes, thresholds, min_precision) + + +def _multilabel_recall_at_fixed_precision_arg_validation( + num_labels: int, + min_precision: float, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, +) -> None: + _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index) + if not isinstance(min_precision, float) and not (0 <= min_precision <= 1): + raise ValueError( + f"Expected argument `min_precision` to be an float in the [0,1] range, but got {min_precision}" + ) + + +def _multilabel_recall_at_fixed_precision_arg_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + num_labels: int, + thresholds: Optional[Tensor], + ignore_index: Optional[int], + min_precision: float, +) -> Tuple[Tensor, Tensor]: + precision, recall, thresholds = _multilabel_precision_recall_curve_compute( + state, num_labels, thresholds, ignore_index + ) + if isinstance(state, Tensor): + res = [_recall_at_precision(p, r, thresholds, min_precision) for p, r in zip(precision, recall)] + else: + res = [_recall_at_precision(p, r, t, min_precision) for p, r, t in zip(precision, recall, thresholds)] + recall = torch.stack([r[0] for r in res]) + thresholds = torch.stack([r[1] for r in res]) + return recall, thresholds + + +def multilabel_recall_at_fixed_precision( + preds: Tensor, + target: Tensor, + num_labels: int, + min_precision: float, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tuple[Tensor, Tensor]: + r"""Computes the highest possible recall value given the minimum precision thresholds provided for multilabel + tasks. This is done by first calculating the precision-recall curve for different thresholds and the find the + recall for a given precision level. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + min_precision: float value specifying minimum precision threshold. + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + (tuple): a tuple of either 2 tensors or 2 lists containing + + - recall: an 1d tensor of size (n_classes, ) with the maximum recall for the given precision level per class + - thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class + + Example: + >>> from torchmetrics.functional.classification import multilabel_recall_at_fixed_precision + >>> preds = torch.tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = torch.tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) + >>> multilabel_recall_at_fixed_precision(preds, target, num_labels=3, min_precision=0.5, thresholds=None) + (tensor([1., 1., 1.]), tensor([0.0500, 0.5500, 0.0500])) + >>> multilabel_recall_at_fixed_precision(preds, target, num_labels=3, min_precision=0.5, thresholds=5) + (tensor([1., 1., 1.]), tensor([0.0000, 0.5000, 0.0000])) + """ + if validate_args: + _multilabel_recall_at_fixed_precision_arg_validation(num_labels, min_precision, thresholds, ignore_index) + _multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index) + preds, target, thresholds = _multilabel_precision_recall_curve_format( + preds, target, num_labels, thresholds, ignore_index + ) + state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds) + return _multilabel_recall_at_fixed_precision_arg_compute(state, num_labels, thresholds, ignore_index, min_precision) + + +def recall_at_fixed_precision( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + min_precision: float, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Computes the highest possible recall value given the minimum precision thresholds provided. This is done by + first calculating the precision-recall curve for different thresholds and the find the recall for a given + precision level. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_recall_at_fixed_precision`, :func:`multiclass_recall_at_fixed_precision` and + :func:`multilabel_recall_at_fixed_precision` for the specific details of each argument influence and examples. + """ + if task == "binary": + return binary_recall_at_fixed_precision(preds, target, min_precision, thresholds, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + return multiclass_recall_at_fixed_precision( + preds, target, num_classes, min_precision, thresholds, ignore_index, validate_args + ) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_recall_at_fixed_precision( + preds, target, num_labels, min_precision, thresholds, ignore_index, validate_args + ) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/roc.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/roc.py new file mode 100644 index 0000000000000000000000000000000000000000..38e46a1755d6268b56407363bf9953df28ac21b7 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/roc.py @@ -0,0 +1,496 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.classification.precision_recall_curve import ( + _binary_clf_curve, + _binary_precision_recall_curve_arg_validation, + _binary_precision_recall_curve_format, + _binary_precision_recall_curve_tensor_validation, + _binary_precision_recall_curve_update, + _multiclass_precision_recall_curve_arg_validation, + _multiclass_precision_recall_curve_format, + _multiclass_precision_recall_curve_tensor_validation, + _multiclass_precision_recall_curve_update, + _multilabel_precision_recall_curve_arg_validation, + _multilabel_precision_recall_curve_format, + _multilabel_precision_recall_curve_tensor_validation, + _multilabel_precision_recall_curve_update, +) +from torchmetrics.utilities import rank_zero_warn +from torchmetrics.utilities.compute import _safe_divide + + +def _binary_roc_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + thresholds: Optional[Tensor], + pos_label: int = 1, +) -> Tuple[Tensor, Tensor, Tensor]: + if isinstance(state, Tensor) and thresholds is not None: + tps = state[:, 1, 1] + fps = state[:, 0, 1] + fns = state[:, 1, 0] + tns = state[:, 0, 0] + tpr = _safe_divide(tps, tps + fns).flip(0) + fpr = _safe_divide(fps, fps + tns).flip(0) + thresholds = thresholds.flip(0) + else: + fps, tps, thresholds = _binary_clf_curve(preds=state[0], target=state[1], pos_label=pos_label) + # Add an extra threshold position to make sure that the curve starts at (0, 0) + tps = torch.cat([torch.zeros(1, dtype=tps.dtype, device=tps.device), tps]) + fps = torch.cat([torch.zeros(1, dtype=fps.dtype, device=fps.device), fps]) + thresholds = torch.cat([torch.ones(1, dtype=thresholds.dtype, device=thresholds.device), thresholds]) + + if fps[-1] <= 0: + rank_zero_warn( + "No negative samples in targets, false positive value should be meaningless." + " Returning zero tensor in false positive score", + UserWarning, + ) + fpr = torch.zeros_like(thresholds) + else: + fpr = fps / fps[-1] + + if tps[-1] <= 0: + rank_zero_warn( + "No positive samples in targets, true positive value should be meaningless." + " Returning zero tensor in true positive score", + UserWarning, + ) + tpr = torch.zeros_like(thresholds) + else: + tpr = tps / tps[-1] + + return fpr, tpr, thresholds + + +def binary_roc( + preds: Tensor, + target: Tensor, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tuple[Tensor, Tensor, Tensor]: + r"""Computes the Receiver Operating Characteristic (ROC) for binary tasks. The curve consist of multiple pairs + of true positive rate (TPR) and false positive rate (FPR) values evaluated at different thresholds, such that + the tradeoff between the two values can be seen. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class. + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds})` (constant memory). + + Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr which + are sorted in reversed order during their calculation, such that they are monotome increasing. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + (tuple): a tuple of 3 tensors containing: + + - fpr: an 1d tensor of size (n_thresholds+1, ) with false positive rate values + - tpr: an 1d tensor of size (n_thresholds+1, ) with true positive rate values + - thresholds: an 1d tensor of size (n_thresholds, ) with decreasing threshold values + + Example: + >>> from torchmetrics.functional.classification import binary_roc + >>> preds = torch.tensor([0, 0.5, 0.7, 0.8]) + >>> target = torch.tensor([0, 1, 1, 0]) + >>> binary_roc(preds, target, thresholds=None) # doctest: +NORMALIZE_WHITESPACE + (tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]), + tensor([0.0000, 0.0000, 0.5000, 1.0000, 1.0000]), + tensor([1.0000, 0.8000, 0.7000, 0.5000, 0.0000])) + >>> binary_roc(preds, target, thresholds=5) # doctest: +NORMALIZE_WHITESPACE + (tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]), + tensor([0., 0., 1., 1., 1.]), + tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000])) + """ + if validate_args: + _binary_precision_recall_curve_arg_validation(thresholds, ignore_index) + _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index) + preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index) + state = _binary_precision_recall_curve_update(preds, target, thresholds) + return _binary_roc_compute(state, thresholds) + + +def _multiclass_roc_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + num_classes: int, + thresholds: Optional[Tensor], +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + if isinstance(state, Tensor) and thresholds is not None: + tps = state[:, :, 1, 1] + fps = state[:, :, 0, 1] + fns = state[:, :, 1, 0] + tns = state[:, :, 0, 0] + tpr = _safe_divide(tps, tps + fns).flip(0).T + fpr = _safe_divide(fps, fps + tns).flip(0).T + thresholds = thresholds.flip(0) + else: + fpr, tpr, thresholds = [], [], [] + for i in range(num_classes): + res = _binary_roc_compute([state[0][:, i], state[1]], thresholds=None, pos_label=i) + fpr.append(res[0]) + tpr.append(res[1]) + thresholds.append(res[2]) + return fpr, tpr, thresholds + + +def multiclass_roc( + preds: Tensor, + target: Tensor, + num_classes: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Computes the Receiver Operating Characteristic (ROC) for multiclass tasks. The curve consist of multiple + pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at different thresholds, such + that the tradeoff between the two values can be seen. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + softmax per sample. + - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory). + + Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr which + are sorted in reversed order during their calculation, such that they are monotome increasing. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + (tuple): a tuple of either 3 tensors or 3 lists containing + + - fpr: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, ) + with false positive rate values (length may differ between classes). If `thresholds` is set to something else, + then a single 2d tensor of size (n_classes, n_thresholds+1) with false positive rate values is returned. + - tpr: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, ) + with true positive rate values (length may differ between classes). If `thresholds` is set to something else, + then a single 2d tensor of size (n_classes, n_thresholds+1) with true positive rate values is returned. + - thresholds: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds, ) + with decreasing threshold values (length may differ between classes). If `threshold` is set to something else, + then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all classes. + + Example: + >>> from torchmetrics.functional.classification import multiclass_roc + >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.05, 0.75, 0.05]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> fpr, tpr, thresholds = multiclass_roc( + ... preds, target, num_classes=5, thresholds=None + ... ) + >>> fpr # doctest: +NORMALIZE_WHITESPACE + [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), + tensor([0.0000, 0.3333, 1.0000]), tensor([0., 1.])] + >>> tpr + [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0., 0.])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.7500, 0.0500]), + tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.0500])] + >>> multiclass_roc( + ... preds, target, num_classes=5, thresholds=5 + ... ) # doctest: +NORMALIZE_WHITESPACE + (tensor([[0.0000, 0.0000, 0.0000, 0.0000, 1.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 1.0000], + [0.0000, 0.3333, 0.3333, 0.3333, 1.0000], + [0.0000, 0.3333, 0.3333, 0.3333, 1.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]), + tensor([[0., 1., 1., 1., 1.], + [0., 1., 1., 1., 1.], + [0., 0., 0., 0., 1.], + [0., 0., 0., 0., 1.], + [0., 0., 0., 0., 0.]]), + tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000])) + """ + if validate_args: + _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index) + _multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index) + preds, target, thresholds = _multiclass_precision_recall_curve_format( + preds, target, num_classes, thresholds, ignore_index + ) + state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds) + return _multiclass_roc_compute(state, num_classes, thresholds) + + +def _multilabel_roc_compute( + state: Union[Tensor, Tuple[Tensor, Tensor]], + num_labels: int, + thresholds: Optional[Tensor], + ignore_index: Optional[int] = None, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + if isinstance(state, Tensor) and thresholds is not None: + tps = state[:, :, 1, 1] + fps = state[:, :, 0, 1] + fns = state[:, :, 1, 0] + tns = state[:, :, 0, 0] + tpr = _safe_divide(tps, tps + fns).flip(0).T + fpr = _safe_divide(fps, fps + tns).flip(0).T + thresholds = thresholds.flip(0) + else: + fpr, tpr, thresholds = [], [], [] + for i in range(num_labels): + preds = state[0][:, i] + target = state[1][:, i] + if ignore_index is not None: + idx = target == ignore_index + preds = preds[~idx] + target = target[~idx] + res = _binary_roc_compute([preds, target], thresholds=None, pos_label=1) + fpr.append(res[0]) + tpr.append(res[1]) + thresholds.append(res[2]) + return fpr, tpr, thresholds + + +def multilabel_roc( + preds: Tensor, + target: Tensor, + num_labels: int, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Computes the Receiver Operating Characteristic (ROC) for multilabel tasks. The curve consist of multiple + pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at different thresholds, such + that the tradeoff between the two values can be seen. + + Accepts the following input tensors: + + - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each + observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply + sigmoid per element. + - ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore + only contain {0,1} values (except if `ignore_index` is specified). + + Additional dimension ``...`` will be flattened into the batch dimension. + + The implementation both supports calculating the metric in a non-binned but accurate version and a binned version + that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the + non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds` + argument to either an integer, list or a 1d tensor will use a binned version that uses memory of + size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory). + + Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr which + are sorted in reversed order during their calculation, such that they are monotome increasing. + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + thresholds: + Can be one of: + + - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from + all the data. Most accurate but also most memory consuming approach. + - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from + 0 to 1 as bins for the calculation. + - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation + - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as + bins for the calculation. + + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + (tuple): a tuple of either 3 tensors or 3 lists containing + + - fpr: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, ) + with false positive rate values (length may differ between labels). If `thresholds` is set to something else, + then a single 2d tensor of size (n_labels, n_thresholds+1) with false positive rate values is returned. + - tpr: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, ) + with true positive rate values (length may differ between labels). If `thresholds` is set to something else, + then a single 2d tensor of size (n_labels, n_thresholds+1) with true positive rate values is returned. + - thresholds: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds, ) + with decreasing threshold values (length may differ between labels). If `threshold` is set to something else, + then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all labels. + + Example: + >>> from torchmetrics.functional.classification import multilabel_roc + >>> preds = torch.tensor([[0.75, 0.05, 0.35], + ... [0.45, 0.75, 0.05], + ... [0.05, 0.55, 0.75], + ... [0.05, 0.65, 0.05]]) + >>> target = torch.tensor([[1, 0, 1], + ... [0, 0, 0], + ... [0, 1, 1], + ... [1, 1, 1]]) + >>> fpr, tpr, thresholds = multilabel_roc( + ... preds, target, num_labels=3, thresholds=None + ... ) + >>> fpr # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0000, 0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]), + tensor([0., 0., 0., 1.])] + >>> tpr # doctest: +NORMALIZE_WHITESPACE + [tensor([0.0000, 0.5000, 0.5000, 1.0000]), + tensor([0.0000, 0.0000, 0.5000, 1.0000, 1.0000]), + tensor([0.0000, 0.3333, 0.6667, 1.0000])] + >>> thresholds # doctest: +NORMALIZE_WHITESPACE + [tensor([1.0000, 0.7500, 0.4500, 0.0500]), + tensor([1.0000, 0.7500, 0.6500, 0.5500, 0.0500]), + tensor([1.0000, 0.7500, 0.3500, 0.0500])] + >>> multilabel_roc( + ... preds, target, num_labels=3, thresholds=5 + ... ) # doctest: +NORMALIZE_WHITESPACE + (tensor([[0.0000, 0.0000, 0.0000, 0.5000, 1.0000], + [0.0000, 0.5000, 0.5000, 0.5000, 1.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]), + tensor([[0.0000, 0.5000, 0.5000, 0.5000, 1.0000], + [0.0000, 0.0000, 1.0000, 1.0000, 1.0000], + [0.0000, 0.3333, 0.3333, 0.6667, 1.0000]]), + tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000])) + """ + if validate_args: + _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index) + _multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index) + preds, target, thresholds = _multilabel_precision_recall_curve_format( + preds, target, num_labels, thresholds, ignore_index + ) + state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds) + return _multilabel_roc_compute(state, num_labels, thresholds, ignore_index) + + +def roc( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + thresholds: Optional[Union[int, List[float], Tensor]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Computes the Receiver Operating Characteristic (ROC). The curve consist of multiple pairs of true positive + rate (TPR) and false positive rate (FPR) values evaluated at different thresholds, such that the tradeoff + between the two values can be seen. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_roc`, :func:`multiclass_roc` and :func:`multilabel_roc` for the specific details of each argument + influence and examples. + + Legacy Example: + >>> pred = torch.tensor([0.0, 1.0, 2.0, 3.0]) + >>> target = torch.tensor([0, 1, 1, 1]) + >>> fpr, tpr, thresholds = roc(pred, target, task='binary') + >>> fpr + tensor([0., 0., 0., 0., 1.]) + >>> tpr + tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]) + >>> thresholds + tensor([1.0000, 0.9526, 0.8808, 0.7311, 0.5000]) + + >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05], + ... [0.05, 0.75, 0.05, 0.05], + ... [0.05, 0.05, 0.75, 0.05], + ... [0.05, 0.05, 0.05, 0.75]]) + >>> target = torch.tensor([0, 1, 3, 2]) + >>> fpr, tpr, thresholds = roc(pred, target, task='multiclass', num_classes=4) + >>> fpr + [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])] + >>> tpr + [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])] + >>> thresholds + [tensor([1.0000, 0.7500, 0.0500]), + tensor([1.0000, 0.7500, 0.0500]), + tensor([1.0000, 0.7500, 0.0500]), + tensor([1.0000, 0.7500, 0.0500])] + + >>> pred = torch.tensor([[0.8191, 0.3680, 0.1138], + ... [0.3584, 0.7576, 0.1183], + ... [0.2286, 0.3468, 0.1338], + ... [0.8603, 0.0745, 0.1837]]) + >>> target = torch.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]]) + >>> fpr, tpr, thresholds = roc(pred, target, task='multilabel', num_labels=3) + >>> fpr + [tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]), + tensor([0., 0., 0., 1., 1.]), + tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])] + >>> tpr + [tensor([0., 0., 1., 1., 1.]), tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]), tensor([0., 1., 1., 1., 1.])] + >>> thresholds + [tensor([1.0000, 0.8603, 0.8191, 0.3584, 0.2286]), + tensor([1.0000, 0.7576, 0.3680, 0.3468, 0.0745]), + tensor([1.0000, 0.1837, 0.1338, 0.1183, 0.1138])] + """ + if task == "binary": + return binary_roc(preds, target, thresholds, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + return multiclass_roc(preds, target, num_classes, thresholds, ignore_index, validate_args) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_roc(preds, target, num_labels, thresholds, ignore_index, validate_args) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/stat_scores.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/stat_scores.py new file mode 100644 index 0000000000000000000000000000000000000000..f4ada0e594a2a00bdec50afcded996b1807187f8 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/stat_scores.py @@ -0,0 +1,1117 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple, Union + +import torch +from torch import Tensor, tensor +from typing_extensions import Literal + +from torchmetrics.utilities.checks import _check_same_shape, _input_format_classification +from torchmetrics.utilities.data import _bincount, select_topk +from torchmetrics.utilities.enums import AverageMethod, DataType, MDMCAverageMethod + + +def _binary_stat_scores_arg_validation( + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, +) -> None: + """Validate non tensor input. + + - ``threshold`` has to be a float in the [0,1] range + - ``multidim_average`` has to be either "global" or "samplewise" + - ``ignore_index`` has to be None or int + """ + if not (isinstance(threshold, float) and (0 <= threshold <= 1)): + raise ValueError(f"Expected argument `threshold` to be a float in the [0,1] range, but got {threshold}.") + allowed_multidim_average = ("global", "samplewise") + if multidim_average not in allowed_multidim_average: + raise ValueError( + f"Expected argument `multidim_average` to be one of {allowed_multidim_average}, but got {multidim_average}" + ) + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + + +def _binary_stat_scores_tensor_validation( + preds: Tensor, + target: Tensor, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, +) -> None: + """Validate tensor input. + + - tensors have to be of same shape + - all values in target tensor that are not ignored have to be in {0, 1} + - if pred tensor is not floating point, then all values also have to be in {0, 1} + - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be atleast 2 dimensional + """ + # Check that they have same shape + _check_same_shape(preds, target) + + # Check that target only contains [0,1] values or value in ignore_index + unique_values = torch.unique(target) + if ignore_index is None: + check = torch.any((unique_values != 0) & (unique_values != 1)) + else: + check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index)) + if check: + raise RuntimeError( + f"Detected the following values in `target`: {unique_values} but expected only" + f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}." + ) + + # If preds is label tensor, also check that it only contains [0,1] values + if not preds.is_floating_point(): + unique_values = torch.unique(preds) + if torch.any((unique_values != 0) & (unique_values != 1)): + raise RuntimeError( + f"Detected the following values in `preds`: {unique_values} but expected only" + " the following values [0,1] since `preds` is a label tensor." + ) + + if multidim_average != "global" and preds.ndim < 2: + raise ValueError("Expected input to be atleast 2D when multidim_average is set to `samplewise`") + + +def _binary_stat_scores_format( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + ignore_index: Optional[int] = None, +) -> Tuple[Tensor, Tensor]: + """Convert all input to label format. + + - If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range + - If preds tensor is floating point, thresholds afterwards + - Mask all datapoints that should be ignored with negative values + """ + if preds.is_floating_point(): + if not torch.all((0 <= preds) * (preds <= 1)): + # preds is logits, convert with sigmoid + preds = preds.sigmoid() + preds = preds > threshold + + preds = preds.reshape(preds.shape[0], -1) + target = target.reshape(target.shape[0], -1) + + if ignore_index is not None: + idx = target == ignore_index + target = target.clone() + target[idx] = -1 + + return preds, target + + +def _binary_stat_scores_update( + preds: Tensor, + target: Tensor, + multidim_average: Literal["global", "samplewise"] = "global", +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Computes the statistics.""" + sum_dim = [0, 1] if multidim_average == "global" else 1 + tp = ((target == preds) & (target == 1)).sum(sum_dim).squeeze() + fn = ((target != preds) & (target == 1)).sum(sum_dim).squeeze() + fp = ((target != preds) & (target == 0)).sum(sum_dim).squeeze() + tn = ((target == preds) & (target == 0)).sum(sum_dim).squeeze() + return tp, fp, tn, fn + + +def _binary_stat_scores_compute( + tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor, multidim_average: Literal["global", "samplewise"] = "global" +) -> Tensor: + """Stack statistics and compute support also.""" + return torch.stack([tp, fp, tn, fn, tp + fn], dim=0 if multidim_average == "global" else 1).squeeze() + + +def binary_stat_scores( + preds: Tensor, + target: Tensor, + threshold: float = 0.5, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes the number of true positives, false positives, true negatives, false negatives and the support for + binary tasks. Related to `Type I and Type II errors`_. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + threshold: Threshold for transforming probability to binary {0,1} predictions + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds + to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape + depends on the ``multidim_average`` parameter: + + - If ``multidim_average`` is set to ``global``, the shape will be ``(5,)`` + - If ``multidim_average`` is set to ``samplewise``, the shape will be ``(N, 5)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import binary_stat_scores + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0, 0, 1, 1, 0, 1]) + >>> binary_stat_scores(preds, target) + tensor([2, 1, 2, 1, 3]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import binary_stat_scores + >>> target = torch.tensor([0, 1, 0, 1, 0, 1]) + >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92]) + >>> binary_stat_scores(preds, target) + tensor([2, 1, 2, 1, 3]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import binary_stat_scores + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> binary_stat_scores(preds, target, multidim_average='samplewise') + tensor([[2, 3, 0, 1, 3], + [0, 2, 1, 3, 3]]) + """ + if validate_args: + _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index) + _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index) + preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index) + tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average) + return _binary_stat_scores_compute(tp, fp, tn, fn, multidim_average) + + +def _multiclass_stat_scores_arg_validation( + num_classes: int, + top_k: int = 1, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, +) -> None: + """Validate non tensor input. + + - ``num_classes`` has to be a int larger than 1 + - ``top_k`` has to be an int larger than 0 but no larger than number of classes + - ``average`` has to be "micro" | "macro" | "weighted" | "none" + - ``multidim_average`` has to be either "global" or "samplewise" + - ``ignore_index`` has to be None or int + """ + if not isinstance(num_classes, int) or num_classes < 2: + raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}") + if not isinstance(top_k, int) and top_k < 1: + raise ValueError(f"Expected argument `top_k` to be an integer larger than or equal to 1, but got {top_k}") + if top_k > num_classes: + raise ValueError( + f"Expected argument `top_k` to be smaller or equal to `num_classes` but got {top_k} and {num_classes}" + ) + allowed_average = ("micro", "macro", "weighted", "none", None) + if average not in allowed_average: + raise ValueError(f"Expected argument `average` to be one of {allowed_average}, but got {average}") + allowed_multidim_average = ("global", "samplewise") + if multidim_average not in allowed_multidim_average: + raise ValueError( + f"Expected argument `multidim_average` to be one of {allowed_multidim_average}, but got {multidim_average}" + ) + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + + +def _multiclass_stat_scores_tensor_validation( + preds: Tensor, + target: Tensor, + num_classes: int, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, +) -> None: + """Validate tensor input. + + - if target has one more dimension than preds, then all dimensions except for preds.shape[1] should match + exactly. preds.shape[1] should have size equal to number of classes + - if preds and target have same number of dims, then all dimensions should match + - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be atleast 2 dimensional in the + int case and 3 dimensional in the float case + - all values in target tensor that are not ignored have to be {0, ..., num_classes - 1} + - if pred tensor is not floating point, then all values also have to be in {0, ..., num_classes - 1} + """ + if preds.ndim == target.ndim + 1: + if not preds.is_floating_point(): + raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.") + if preds.shape[1] != num_classes: + raise ValueError( + "If `preds` have one dimension more than `target`, `preds.shape[1]` should be" + " equal to number of classes." + ) + if preds.shape[2:] != target.shape[1:]: + raise ValueError( + "If `preds` have one dimension more than `target`, the shape of `preds` should be" + " (N, C, ...), and the shape of `target` should be (N, ...)." + ) + if multidim_average != "global" and preds.ndim < 3: + raise ValueError( + "If `preds` have one dimension more than `target`, the shape of `preds` should " + " atleast 3D when multidim_average is set to `samplewise`" + ) + + elif preds.ndim == target.ndim: + if preds.shape != target.shape: + raise ValueError( + "The `preds` and `target` should have the same shape,", + f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.", + ) + if multidim_average != "global" and preds.ndim < 2: + raise ValueError( + "When `preds` and `target` have the same shape, the shape of `preds` should " + " atleast 2D when multidim_average is set to `samplewise`" + ) + else: + raise ValueError( + "Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)" + " and `preds` should be (N, C, ...)." + ) + + num_unique_values = len(torch.unique(target)) + if ignore_index is None: + check = num_unique_values > num_classes + else: + check = num_unique_values > num_classes + 1 + if check: + raise RuntimeError( + "Detected more unique values in `target` than `num_classes`. Expected only " + f"{num_classes if ignore_index is None else num_classes + 1} but found" + f"{num_unique_values} in `target`." + ) + + if not preds.is_floating_point(): + unique_values = torch.unique(preds) + if len(unique_values) > num_classes: + raise RuntimeError( + "Detected more unique values in `preds` than `num_classes`. Expected only " + f"{num_classes} but found {len(unique_values)} in `preds`." + ) + + +def _multiclass_stat_scores_format( + preds: Tensor, + target: Tensor, + top_k: int = 1, +) -> Tuple[Tensor, Tensor]: + """Convert all input to label format except if ``top_k`` is not 1. + + - Applies argmax if preds have one more dimension than target + - Flattens additional dimensions + """ + # Apply argmax if we have one more dimension + if preds.ndim == target.ndim + 1 and top_k == 1: + preds = preds.argmax(dim=1) + if top_k != 1: + preds = preds.reshape(*preds.shape[:2], -1) + else: + preds = preds.reshape(preds.shape[0], -1) + target = target.reshape(target.shape[0], -1) + return preds, target + + +def _multiclass_stat_scores_update( + preds: Tensor, + target: Tensor, + num_classes: int, + top_k: int = 1, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Computes the statistics. + + - If ``multidim_average`` is equal to samplewise or ``top_k`` is not 1, we transform both preds and + target into one hot format. + - Else we calculate statistics by first calculating the confusion matrix and afterwards deriving the + statistics from that + - Remove all datapoints that should be ignored. Depending on if ``ignore_index`` is in the set of labels + or outside we have do use different augmentation stategies when one hot encoding. + """ + if multidim_average == "samplewise" or top_k != 1: + ignore_in = 0 <= ignore_index <= num_classes - 1 if ignore_index is not None else None + if ignore_index is not None and not ignore_in: + preds = preds.clone() + target = target.clone() + idx = target == ignore_index + target[idx] = num_classes + idx = idx.unsqueeze(1).repeat(1, num_classes, 1) if preds.ndim > target.ndim else idx + preds[idx] = num_classes + + if top_k > 1: + preds_oh = torch.movedim(select_topk(preds, topk=top_k, dim=1), 1, -1) + else: + preds_oh = torch.nn.functional.one_hot( + preds, num_classes + 1 if ignore_index is not None and not ignore_in else num_classes + ) + target_oh = torch.nn.functional.one_hot( + target, num_classes + 1 if ignore_index is not None and not ignore_in else num_classes + ) + if ignore_index is not None: + if 0 <= ignore_index <= num_classes - 1: + target_oh[target == ignore_index, :] = -1 + else: + preds_oh = preds_oh[..., :-1] if top_k == 1 else preds_oh + target_oh = target_oh[..., :-1] + target_oh[target == num_classes, :] = -1 + sum_dim = [0, 1] if multidim_average == "global" else [1] + tp = ((target_oh == preds_oh) & (target_oh == 1)).sum(sum_dim) + fn = ((target_oh != preds_oh) & (target_oh == 1)).sum(sum_dim) + fp = ((target_oh != preds_oh) & (target_oh == 0)).sum(sum_dim) + tn = ((target_oh == preds_oh) & (target_oh == 0)).sum(sum_dim) + elif average == "micro": + preds = preds.flatten() + target = target.flatten() + if ignore_index is not None: + idx = target != ignore_index + preds = preds[idx] + target = target[idx] + tp = (preds == target).sum() + fp = (preds != target).sum() + fn = (preds != target).sum() + tn = num_classes * preds.numel() - (fp + fn + tp) + else: + preds = preds.flatten() + target = target.flatten() + if ignore_index is not None: + idx = target != ignore_index + preds = preds[idx] + target = target[idx] + unique_mapping = target.to(torch.long) * num_classes + preds.to(torch.long) + bins = _bincount(unique_mapping, minlength=num_classes**2) + confmat = bins.reshape(num_classes, num_classes) + tp = confmat.diag() + fp = confmat.sum(0) - tp + fn = confmat.sum(1) - tp + tn = confmat.sum() - (fp + fn + tp) + return tp, fp, tn, fn + + +def _multiclass_stat_scores_compute( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", +) -> Tensor: + """Stack statistics and compute support also. + + Applies average strategy afterwards. + """ + res = torch.stack([tp, fp, tn, fn, tp + fn], dim=-1) + sum_dim = 0 if multidim_average == "global" else 1 + if average == "micro": + return res.sum(sum_dim) if res.ndim > 1 else res + if average == "macro": + return res.float().mean(sum_dim) + elif average == "weighted": + weight = tp + fn + if multidim_average == "global": + return (res * (weight / weight.sum()).reshape(*weight.shape, 1)).sum(sum_dim) + else: + return (res * (weight / weight.sum(-1, keepdim=True)).reshape(*weight.shape, 1)).sum(sum_dim) + elif average is None or average == "none": + return res + + +def multiclass_stat_scores( + preds: Tensor, + target: Tensor, + num_classes: int, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + top_k: int = 1, + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes the number of true positives, false positives, true negatives, false negatives and the support for + multiclass tasks. Related to `Type I and Type II errors`_. + + Accepts the following input tensors: + + - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point + we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into + an int tensor. + - ``target`` (int tensor): ``(N, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_classes: Integer specifing the number of classes + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + top_k: + Number of highest probability or logit score predictions considered to find the correct label. + Only works when ``preds`` contain probabilities/logits. + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds + to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape + depends on ``average`` and ``multidim_average`` parameters: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(5,)`` + - If ``average=None/'none'``, the shape will be ``(C, 5)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N, 5)`` + - If ``average=None/'none'``, the shape will be ``(N, C, 5)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multiclass_stat_scores + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([2, 1, 0, 1]) + >>> multiclass_stat_scores(preds, target, num_classes=3, average='micro') + tensor([3, 1, 7, 1, 4]) + >>> multiclass_stat_scores(preds, target, num_classes=3, average=None) + tensor([[1, 0, 2, 1, 2], + [1, 1, 2, 0, 1], + [1, 0, 3, 0, 1]]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multiclass_stat_scores + >>> target = torch.tensor([2, 1, 0, 0]) + >>> preds = torch.tensor([ + ... [0.16, 0.26, 0.58], + ... [0.22, 0.61, 0.17], + ... [0.71, 0.09, 0.20], + ... [0.05, 0.82, 0.13], + ... ]) + >>> multiclass_stat_scores(preds, target, num_classes=3, average='micro') + tensor([3, 1, 7, 1, 4]) + >>> multiclass_stat_scores(preds, target, num_classes=3, average=None) + tensor([[1, 0, 2, 1, 2], + [1, 1, 2, 0, 1], + [1, 0, 3, 0, 1]]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multiclass_stat_scores + >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]]) + >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]]) + >>> multiclass_stat_scores(preds, target, num_classes=3, multidim_average='samplewise', average='micro') + tensor([[3, 3, 9, 3, 6], + [2, 4, 8, 4, 6]]) + >>> multiclass_stat_scores(preds, target, num_classes=3, multidim_average='samplewise', average=None) + tensor([[[2, 1, 3, 0, 2], + [0, 1, 3, 2, 2], + [1, 1, 3, 1, 2]], + [[0, 1, 4, 1, 1], + [1, 1, 2, 2, 3], + [1, 2, 2, 1, 2]]]) + """ + if validate_args: + _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index) + _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index) + preds, target = _multiclass_stat_scores_format(preds, target, top_k) + tp, fp, tn, fn = _multiclass_stat_scores_update( + preds, target, num_classes, top_k, average, multidim_average, ignore_index + ) + return _multiclass_stat_scores_compute(tp, fp, tn, fn, average, multidim_average) + + +def _multilabel_stat_scores_arg_validation( + num_labels: int, + threshold: float = 0.5, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, +) -> None: + """Validate non tensor input. + + - ``num_labels`` should be an int larger than 1 + - ``threshold`` has to be a float in the [0,1] range + - ``average`` has to be "micro" | "macro" | "weighted" | "none" + - ``multidim_average`` has to be either "global" or "samplewise" + - ``ignore_index`` has to be None or int + """ + if not isinstance(num_labels, int) or num_labels < 2: + raise ValueError(f"Expected argument `num_labels` to be an integer larger than 1, but got {num_labels}") + if not (isinstance(threshold, float) and (0 <= threshold <= 1)): + raise ValueError(f"Expected argument `threshold` to be a float, but got {threshold}.") + allowed_average = ("micro", "macro", "weighted", "none", None) + if average not in allowed_average: + raise ValueError(f"Expected argument `average` to be one of {allowed_average}, but got {average}") + allowed_multidim_average = ("global", "samplewise") + if multidim_average not in allowed_multidim_average: + raise ValueError( + f"Expected argument `multidim_average` to be one of {allowed_multidim_average}, but got {multidim_average}" + ) + if ignore_index is not None and not isinstance(ignore_index, int): + raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}") + + +def _multilabel_stat_scores_tensor_validation( + preds: Tensor, + target: Tensor, + num_labels: int, + multidim_average: str, + ignore_index: Optional[int] = None, +) -> None: + """Validate tensor input. + + - tensors have to be of same shape + - the second dimension of both tensors need to be equal to the number of labels + - all values in target tensor that are not ignored have to be in {0, 1} + - if pred tensor is not floating point, then all values also have to be in {0, 1} + - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be atleast 3 dimensional + """ + # Check that they have same shape + _check_same_shape(preds, target) + + if preds.shape[1] != num_labels: + raise ValueError( + "Expected both `target.shape[1]` and `preds.shape[1]` to be equal to the number of labels" + f" but got {preds.shape[1]} and expected {num_labels}" + ) + + # Check that target only contains [0,1] values or value in ignore_index + unique_values = torch.unique(target) + if ignore_index is None: + check = torch.any((unique_values != 0) & (unique_values != 1)) + else: + check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index)) + if check: + raise RuntimeError( + f"Detected the following values in `target`: {unique_values} but expected only" + f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}." + ) + + # If preds is label tensor, also check that it only contains [0,1] values + if not preds.is_floating_point(): + unique_values = torch.unique(preds) + if torch.any((unique_values != 0) & (unique_values != 1)): + raise RuntimeError( + f"Detected the following values in `preds`: {unique_values} but expected only" + " the following values [0,1] since preds is a label tensor." + ) + + if multidim_average != "global" and preds.ndim < 3: + raise ValueError("Expected input to be atleast 3D when multidim_average is set to `samplewise`") + + +def _multilabel_stat_scores_format( + preds: Tensor, target: Tensor, num_labels: int, threshold: float = 0.5, ignore_index: Optional[int] = None +) -> Tuple[Tensor, Tensor]: + """Convert all input to label format. + + - If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range + - If preds tensor is floating point, thresholds afterwards + - Mask all elements that should be ignored with negative numbers for later filtration + """ + if preds.is_floating_point(): + if not torch.all((0 <= preds) * (preds <= 1)): + preds = preds.sigmoid() + preds = preds > threshold + preds = preds.reshape(*preds.shape[:2], -1) + target = target.reshape(*target.shape[:2], -1) + + if ignore_index is not None: + idx = target == ignore_index + target = target.clone() + target[idx] = -1 + + return preds, target + + +def _multilabel_stat_scores_update( + preds: Tensor, target: Tensor, multidim_average: Literal["global", "samplewise"] = "global" +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Computes the statistics.""" + sum_dim = [0, -1] if multidim_average == "global" else [-1] + tp = ((target == preds) & (target == 1)).sum(sum_dim).squeeze() + fn = ((target != preds) & (target == 1)).sum(sum_dim).squeeze() + fp = ((target != preds) & (target == 0)).sum(sum_dim).squeeze() + tn = ((target == preds) & (target == 0)).sum(sum_dim).squeeze() + return tp, fp, tn, fn + + +def _multilabel_stat_scores_compute( + tp: Tensor, + fp: Tensor, + tn: Tensor, + fn: Tensor, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", +) -> Tensor: + """Stack statistics and compute support also. + + Applies average strategy afterwards. + """ + res = torch.stack([tp, fp, tn, fn, tp + fn], dim=-1) + sum_dim = 0 if multidim_average == "global" else 1 + if average == "micro": + return res.sum(sum_dim) + elif average == "macro": + return res.float().mean(sum_dim) + elif average == "weighted": + w = tp + fn + return (res * (w / w.sum()).reshape(*w.shape, 1)).sum(sum_dim) + elif average is None or average == "none": + return res + + +def multilabel_stat_scores( + preds: Tensor, + target: Tensor, + num_labels: int, + threshold: float = 0.5, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro", + multidim_average: Literal["global", "samplewise"] = "global", + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes the number of true positives, false positives, true negatives, false negatives and the support for + multilabel tasks. Related to `Type I and Type II errors`_. + + Accepts the following input tensors: + + - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside + [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally, + we convert to int tensor with thresholding using the value in ``threshold``. + - ``target`` (int tensor): ``(N, C, ...)`` + + Args: + preds: Tensor with predictions + target: Tensor with true labels + num_labels: Integer specifing the number of labels + threshold: Threshold for transforming probability to binary (0,1) predictions + average: + Defines the reduction that is applied over labels. Should be one of the following: + + - ``micro``: Sum statistics over all labels + - ``macro``: Calculate statistics for each label and average them + - ``weighted``: Calculates statistics for each label and computes weighted average using their support + - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction + + multidim_average: + Defines how additionally dimensions ``...`` should be handled. Should be one of the following: + + - ``global``: Additional dimensions are flatted along the batch dimension + - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis. + The statistics in this case are calculated over the additional dimensions. + + ignore_index: + Specifies a target value that is ignored and does not contribute to the metric calculation + validate_args: bool indicating if input arguments and tensors should be validated for correctness. + Set to ``False`` for faster computations. + + Returns: + The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds + to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape + depends on ``average`` and ``multidim_average`` parameters: + + - If ``multidim_average`` is set to ``global``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(5,)`` + - If ``average=None/'none'``, the shape will be ``(C, 5)`` + + - If ``multidim_average`` is set to ``samplewise``: + + - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N, 5)`` + - If ``average=None/'none'``, the shape will be ``(N, C, 5)`` + + Example (preds is int tensor): + >>> from torchmetrics.functional.classification import multilabel_stat_scores + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]]) + >>> multilabel_stat_scores(preds, target, num_labels=3, average='micro') + tensor([2, 1, 2, 1, 3]) + >>> multilabel_stat_scores(preds, target, num_labels=3, average=None) + tensor([[1, 0, 1, 0, 1], + [0, 0, 1, 1, 1], + [1, 1, 0, 0, 1]]) + + Example (preds is float tensor): + >>> from torchmetrics.functional.classification import multilabel_stat_scores + >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]]) + >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]]) + >>> multilabel_stat_scores(preds, target, num_labels=3, average='micro') + tensor([2, 1, 2, 1, 3]) + >>> multilabel_stat_scores(preds, target, num_labels=3, average=None) + tensor([[1, 0, 1, 0, 1], + [0, 0, 1, 1, 1], + [1, 1, 0, 0, 1]]) + + Example (multidim tensors): + >>> from torchmetrics.functional.classification import multilabel_stat_scores + >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]]) + >>> preds = torch.tensor( + ... [ + ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]], + ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]], + ... ] + ... ) + >>> multilabel_stat_scores(preds, target, num_labels=3, multidim_average='samplewise', average='micro') + tensor([[2, 3, 0, 1, 3], + [0, 2, 1, 3, 3]]) + >>> multilabel_stat_scores(preds, target, num_labels=3, multidim_average='samplewise', average=None) + tensor([[[1, 1, 0, 0, 1], + [1, 1, 0, 0, 1], + [0, 1, 0, 1, 1]], + [[0, 0, 0, 2, 2], + [0, 2, 0, 0, 0], + [0, 0, 1, 1, 1]]]) + """ + if validate_args: + _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index) + _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index) + preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index) + tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average) + return _multilabel_stat_scores_compute(tp, fp, tn, fn, average, multidim_average) + + +def _del_column(data: Tensor, idx: int) -> Tensor: + """Delete the column at index.""" + return torch.cat([data[:, :idx], data[:, (idx + 1) :]], 1) + + +def _drop_negative_ignored_indices( + preds: Tensor, target: Tensor, ignore_index: int, mode: DataType +) -> Tuple[Tensor, Tensor]: + """Remove negative ignored indices. + + Args: + preds: Predicted tensor + target: Ground truth tensor + ignore_index: Specify a class (label) to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and + ``reduce='macro'``, the class statistics for the ignored class will all be returned + as ``-1``. + mode: Mode of the input tensors + + Return: + Tensors of preds and target without negative ignore target values. + """ + if mode == mode.MULTIDIM_MULTICLASS and preds.dtype == torch.float: + # In case or multi-dimensional multi-class with logits + n_dims = len(preds.shape) + num_classes = preds.shape[1] + # move class dim to last so that we can flatten the additional dimensions into N: [N, C, ...] -> [N, ..., C] + preds = preds.transpose(1, n_dims - 1) + + # flatten: [N, ..., C] -> [N', C] + preds = preds.reshape(-1, num_classes) + target = target.reshape(-1) + + if mode in [mode.MULTICLASS, mode.MULTIDIM_MULTICLASS]: + preds = preds[target != ignore_index] + target = target[target != ignore_index] + + return preds, target + + +def _stat_scores( + preds: Tensor, + target: Tensor, + reduce: Optional[str] = "micro", +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Calculate the number of tp, fp, tn, fn. + + Args: + preds: An ``(N, C)`` or ``(N, C, X)`` tensor of predictions (0 or 1) + target: An ``(N, C)`` or ``(N, C, X)`` tensor of true labels (0 or 1) + reduce: One of ``'micro'``, ``'macro'``, ``'samples'`` + + Return: + Returns a list of 4 tensors; tp, fp, tn, fn. + The shape of the returned tensors depends on the shape of the inputs + and the ``reduce`` parameter: + + If inputs are of the shape ``(N, C)``, then: + + - If ``reduce='micro'``, the returned tensors are 1 element tensors + - If ``reduce='macro'``, the returned tensors are ``(C,)`` tensors + - If ``reduce='samples'``, the returned tensors are ``(N,)`` tensors + + If inputs are of the shape ``(N, C, X)``, then: + + - If ``reduce='micro'``, the returned tensors are ``(N,)`` tensors + - If ``reduce='macro'``, the returned tensors are ``(N,C)`` tensors + - If ``reduce='samples'``, the returned tensors are ``(N,X)`` tensors + """ + dim: Union[int, List[int]] = 1 # for "samples" + if reduce == "micro": + dim = [0, 1] if preds.ndim == 2 else [1, 2] + elif reduce == "macro": + dim = 0 if preds.ndim == 2 else 2 + + true_pred, false_pred = target == preds, target != preds + pos_pred, neg_pred = preds == 1, preds == 0 + + tp = (true_pred * pos_pred).sum(dim=dim) + fp = (false_pred * pos_pred).sum(dim=dim) + + tn = (true_pred * neg_pred).sum(dim=dim) + fn = (false_pred * neg_pred).sum(dim=dim) + + return tp.long(), fp.long(), tn.long(), fn.long() + + +def _stat_scores_update( + preds: Tensor, + target: Tensor, + reduce: Optional[str] = "micro", + mdmc_reduce: Optional[str] = None, + num_classes: Optional[int] = None, + top_k: Optional[int] = 1, + threshold: float = 0.5, + multiclass: Optional[bool] = None, + ignore_index: Optional[int] = None, + mode: DataType = None, +) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Updates and returns the number of true positives, false positives, true negatives, false negatives. Raises + ValueError if: + + - The `ignore_index` is not valid + - When `ignore_index` is used with binary data + - When inputs are multi-dimensional multi-class, and the ``mdmc_reduce`` parameter is not set + + Args: + preds: Predicted tensor + target: Ground truth tensor + reduce: Defines the reduction that is applied + mdmc_reduce: Defines how the multi-dimensional multi-class inputs are handled + num_classes: Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data. + top_k: Number of the highest probability or logit score predictions considered finding the correct label, + relevant only for (multi-dimensional) multi-class inputs + threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case + of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities + multiclass: Used only in certain special cases, where you want to treat inputs as a different type + than what they appear to be + ignore_index: Specify a class (label) to ignore. If given, this class index does not contribute + to the returned score, regardless of reduction method. If an index is ignored, and + ``reduce='macro'``, the class statistics for the ignored class will all be returned + as ``-1``. + mode: Mode of the input tensors + """ + + _negative_index_dropped = False + + if ignore_index is not None and ignore_index < 0 and mode is not None: + preds, target = _drop_negative_ignored_indices(preds, target, ignore_index, mode) + _negative_index_dropped = True + + preds, target, _ = _input_format_classification( + preds, + target, + threshold=threshold, + num_classes=num_classes, + multiclass=multiclass, + top_k=top_k, + ignore_index=ignore_index, + ) + + if ignore_index is not None and ignore_index >= preds.shape[1]: + raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {preds.shape[1]} classes") + + if ignore_index is not None and preds.shape[1] == 1: + raise ValueError("You can not use `ignore_index` with binary data.") + + if preds.ndim == 3: + if not mdmc_reduce: + raise ValueError( + "When your inputs are multi-dimensional multi-class, you have to set the `mdmc_reduce` parameter" + ) + if mdmc_reduce == "global": + preds = torch.transpose(preds, 1, 2).reshape(-1, preds.shape[1]) + target = torch.transpose(target, 1, 2).reshape(-1, target.shape[1]) + + # Delete what is in ignore_index, if applicable (and classes don't matter): + if ignore_index is not None and reduce != "macro" and not _negative_index_dropped: + preds = _del_column(preds, ignore_index) + target = _del_column(target, ignore_index) + + tp, fp, tn, fn = _stat_scores(preds, target, reduce=reduce) + + # Take care of ignore_index + if ignore_index is not None and reduce == "macro" and not _negative_index_dropped: + tp[..., ignore_index] = -1 + fp[..., ignore_index] = -1 + tn[..., ignore_index] = -1 + fn[..., ignore_index] = -1 + + return tp, fp, tn, fn + + +def _stat_scores_compute(tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor) -> Tensor: + """Computes the number of true positives, false positives, true negatives, false negatives. Concatenates the + input tensors along with the support into one output. + + Args: + tp: True positives + fp: False positives + tn: True negatives + fn: False negatives + """ + stats = [ + tp.unsqueeze(-1), + fp.unsqueeze(-1), + tn.unsqueeze(-1), + fn.unsqueeze(-1), + tp.unsqueeze(-1) + fn.unsqueeze(-1), # support + ] + outputs: Tensor = torch.cat(stats, -1) + outputs = torch.where(outputs < 0, tensor(-1, device=outputs.device), outputs) + + return outputs + + +def _reduce_stat_scores( + numerator: Tensor, + denominator: Tensor, + weights: Optional[Tensor], + average: Optional[str], + mdmc_average: Optional[str], + zero_division: int = 0, +) -> Tensor: + """Reduces scores of type ``numerator/denominator`` or. + + ``weights * (numerator/denominator)``, if ``average='weighted'``. + + Args: + numerator: A tensor with numerator numbers. + denominator: A tensor with denominator numbers. If a denominator is + negative, the class will be ignored (if averaging), or its score + will be returned as ``nan`` (if ``average=None``). + If the denominator is zero, then ``zero_division`` score will be + used for those elements. + weights: A tensor of weights to be used if ``average='weighted'``. + average: The method to average the scores + mdmc_average: The method to average the scores if inputs were multi-dimensional multi-class (MDMC) + zero_division: The value to use for the score if denominator equals zero. + """ + numerator, denominator = numerator.float(), denominator.float() + zero_div_mask = denominator == 0 + ignore_mask = denominator < 0 + + if weights is None: + weights = torch.ones_like(denominator) + else: + weights = weights.float() + + numerator = torch.where( + zero_div_mask, tensor(zero_division, dtype=numerator.dtype, device=numerator.device), numerator + ) + denominator = torch.where( + zero_div_mask | ignore_mask, tensor(1.0, dtype=denominator.dtype, device=denominator.device), denominator + ) + weights = torch.where(ignore_mask, tensor(0.0, dtype=weights.dtype, device=weights.device), weights) + + if average not in (AverageMethod.MICRO, AverageMethod.NONE, None): + weights = weights / weights.sum(dim=-1, keepdim=True) + + scores = weights * (numerator / denominator) + + # This is in case where sum(weights) = 0, which happens if we ignore the only present class with average='weighted' + scores = torch.where(torch.isnan(scores), tensor(zero_division, dtype=scores.dtype, device=scores.device), scores) + + if mdmc_average == MDMCAverageMethod.SAMPLEWISE: + scores = scores.mean(dim=0) + ignore_mask = ignore_mask.sum(dim=0).bool() + + if average in (AverageMethod.NONE, None): + scores = torch.where(ignore_mask, tensor(float("nan"), device=scores.device), scores) + else: + scores = scores.sum() + + return scores + + +def stat_scores( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + threshold: float = 0.5, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro", + multidim_average: Optional[Literal["global", "samplewise"]] = "global", + top_k: Optional[int] = 1, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Tensor: + r"""Computes the number of true positives, false positives, true negatives, false negatives and the support. + + This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the + ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of + :func:`binary_stat_scores`, :func:`multiclass_stat_scores` and :func:`multilabel_stat_scores` for the specific + details of each argument influence and examples. + + Legacy Example: + >>> preds = torch.tensor([1, 0, 2, 1]) + >>> target = torch.tensor([1, 1, 2, 0]) + >>> stat_scores(preds, target, task='multiclass', num_classes=3, average='micro') + tensor([2, 2, 6, 2, 4]) + >>> stat_scores(preds, target, task='multiclass', num_classes=3, average=None) + tensor([[0, 1, 2, 1, 1], + [1, 1, 1, 1, 2], + [1, 0, 3, 0, 1]]) + """ + assert multidim_average is not None + if task == "binary": + return binary_stat_scores(preds, target, threshold, multidim_average, ignore_index, validate_args) + if task == "multiclass": + assert isinstance(num_classes, int) + assert isinstance(top_k, int) + return multiclass_stat_scores( + preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args + ) + if task == "multilabel": + assert isinstance(num_labels, int) + return multilabel_stat_scores( + preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args + ) + raise ValueError( + f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}" + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/nominal/__init__.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/nominal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c12c93ac7003563e52fc56f881d74e22b8ed1fcc --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/nominal/__init__.py @@ -0,0 +1,20 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from torchmetrics.functional.nominal.cramers import cramers_v, cramers_v_matrix # noqa: F401 +from torchmetrics.functional.nominal.pearson import ( # noqa: F401 + pearsons_contingency_coefficient, + pearsons_contingency_coefficient_matrix, +) +from torchmetrics.functional.nominal.theils_u import theils_u, theils_u_matrix # noqa: F401 +from torchmetrics.functional.nominal.tschuprows import tschuprows_t, tschuprows_t_matrix # noqa: F401 diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/concordance.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/concordance.py new file mode 100644 index 0000000000000000000000000000000000000000..37cff5edcd66496652a17489c29286b2489b2d6a --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/concordance.py @@ -0,0 +1,68 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +from torch import Tensor + +from torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update + + +def _concordance_corrcoef_compute( + mean_x: Tensor, + mean_y: Tensor, + var_x: Tensor, + var_y: Tensor, + corr_xy: Tensor, + nb: Tensor, +) -> Tensor: + """Computes the final concordance correlation coefficient based on accumulated statistics.""" + pearson = _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb) + return 2.0 * pearson * var_x.sqrt() * var_y.sqrt() / (var_x + var_y + (mean_x - mean_y) ** 2) + + +def concordance_corrcoef(preds: Tensor, target: Tensor) -> Tensor: + r"""Computes concordance correlation coefficient that measures the agreement between two variables. It is + defined as. + + .. math:: + \rho_c = \frac{2 \rho \sigma_x \sigma_y}{\sigma_x^2 + \sigma_y^2 + (\mu_x - \mu_y)^2} + + where :math:`\mu_x, \mu_y` is the means for the two variables, :math:`\sigma_x^2, \sigma_y^2` are the corresponding + variances and \rho is the pearson correlation coefficient between the two variables. + + Args: + preds: estimated scores + target: ground truth scores + + Example (single output regression): + >>> from torchmetrics.functional import concordance_corrcoef + >>> target = torch.tensor([3, -0.5, 2, 7]) + >>> preds = torch.tensor([2.5, 0.0, 2, 8]) + >>> concordance_corrcoef(preds, target) + tensor([0.9777]) + + Example (multi output regression): + >>> from torchmetrics.functional import concordance_corrcoef + >>> target = torch.tensor([[3, -0.5], [2, 7]]) + >>> preds = torch.tensor([[2.5, 0.0], [2, 8]]) + >>> concordance_corrcoef(preds, target) + tensor([0.7273, 0.9887]) + """ + d = preds.shape[1] if preds.ndim == 2 else 1 + _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device) + mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone() + var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone() + mean_x, mean_y, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update( + preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1] + ) + return _concordance_corrcoef_compute(mean_x, mean_y, var_x, var_y, corr_xy, nb) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/explained_variance.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/explained_variance.py new file mode 100644 index 0000000000000000000000000000000000000000..4f3c8dd618e8f6252a3ab7d0db2058bf8a9ca617 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/explained_variance.py @@ -0,0 +1,137 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Sequence, Tuple, Union + +import torch +from torch import Tensor + +from torchmetrics.utilities.checks import _check_same_shape + + +def _explained_variance_update(preds: Tensor, target: Tensor) -> Tuple[int, Tensor, Tensor, Tensor, Tensor]: + """Updates and returns variables required to compute Explained Variance. Checks for same shape of input + tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + + n_obs = preds.size(0) + sum_error = torch.sum(target - preds, dim=0) + diff = target - preds + sum_squared_error = torch.sum(diff * diff, dim=0) + + sum_target = torch.sum(target, dim=0) + sum_squared_target = torch.sum(target * target, dim=0) + + return n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target + + +def _explained_variance_compute( + n_obs: Tensor, + sum_error: Tensor, + sum_squared_error: Tensor, + sum_target: Tensor, + sum_squared_target: Tensor, + multioutput: str = "uniform_average", +) -> Tensor: + """Computes Explained Variance. + + Args: + n_obs: Number of predictions or observations + sum_error: Sum of errors over all observations + sum_squared_error: Sum of square of errors over all observations + sum_target: Sum of target values + sum_squared_target: Sum of squares of target values + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings: + + * ``'raw_values'`` returns full set of scores + * ``'uniform_average'`` scores are uniformly averaged + * ``'variance_weighted'`` scores are weighted by their individual variances + + Example: + >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> n_obs, sum_error, ss_error, sum_target, ss_target = _explained_variance_update(preds, target) + >>> _explained_variance_compute(n_obs, sum_error, ss_error, sum_target, ss_target, multioutput='raw_values') + tensor([0.9677, 1.0000]) + """ + + diff_avg = sum_error / n_obs + numerator = sum_squared_error / n_obs - (diff_avg * diff_avg) + + target_avg = sum_target / n_obs + denominator = sum_squared_target / n_obs - (target_avg * target_avg) + + # Take care of division by zero + nonzero_numerator = numerator != 0 + nonzero_denominator = denominator != 0 + valid_score = nonzero_numerator & nonzero_denominator + output_scores = torch.ones_like(diff_avg) + output_scores[valid_score] = 1.0 - (numerator[valid_score] / denominator[valid_score]) + output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0 + + # Decide what to do in multioutput case + # Todo: allow user to pass in tensor with weights + if multioutput == "raw_values": + return output_scores + if multioutput == "uniform_average": + return torch.mean(output_scores) + if multioutput == "variance_weighted": + denom_sum = torch.sum(denominator) + return torch.sum(denominator / denom_sum * output_scores) + + +def explained_variance( + preds: Tensor, + target: Tensor, + multioutput: str = "uniform_average", +) -> Union[Tensor, Sequence[Tensor]]: + """Computes explained variance. + + Args: + preds: estimated labels + target: ground truth labels + multioutput: Defines aggregation in the case of multiple output scores. Can be one + of the following strings): + + * ``'raw_values'`` returns full set of scores + * ``'uniform_average'`` scores are uniformly averaged + * ``'variance_weighted'`` scores are weighted by their individual variances + + Example: + >>> from torchmetrics.functional import explained_variance + >>> target = torch.tensor([3, -0.5, 2, 7]) + >>> preds = torch.tensor([2.5, 0.0, 2, 8]) + >>> explained_variance(preds, target) + tensor(0.9572) + + >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> explained_variance(preds, target, multioutput='raw_values') + tensor([0.9677, 1.0000]) + """ + n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target = _explained_variance_update(preds, target) + return _explained_variance_compute( + n_obs, + sum_error, + sum_squared_error, + sum_target, + sum_squared_target, + multioutput, + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/log_mse.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/log_mse.py new file mode 100644 index 0000000000000000000000000000000000000000..3fac49e514b8f1401cfe785ac2b386d8f511a282 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/log_mse.py @@ -0,0 +1,76 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import torch +from torch import Tensor + +from torchmetrics.utilities.checks import _check_same_shape + + +def _mean_squared_log_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: + """Returns variables required to compute Mean Squared Log Error. Checks for same shape of tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + sum_squared_log_error = torch.sum(torch.pow(torch.log1p(preds) - torch.log1p(target), 2)) + n_obs = target.numel() + return sum_squared_log_error, n_obs + + +def _mean_squared_log_error_compute(sum_squared_log_error: Tensor, n_obs: int) -> Tensor: + """Computes Mean Squared Log Error. + + Args: + sum_squared_log_error: + Sum of square of log errors over all observations ``(log error = log(target) - log(prediction))`` + n_obs: Number of predictions or observations + + Example: + >>> preds = torch.tensor([0., 1, 2, 3]) + >>> target = torch.tensor([0., 1, 2, 2]) + >>> sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) + >>> _mean_squared_log_error_compute(sum_squared_log_error, n_obs) + tensor(0.0207) + """ + + return sum_squared_log_error / n_obs + + +def mean_squared_log_error(preds: Tensor, target: Tensor) -> Tensor: + """Computes mean squared log error. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with RMSLE + + Example: + >>> from torchmetrics.functional import mean_squared_log_error + >>> x = torch.tensor([0., 1, 2, 3]) + >>> y = torch.tensor([0., 1, 2, 2]) + >>> mean_squared_log_error(x, y) + tensor(0.0207) + + .. note:: + Half precision is only support on GPU for this metric + """ + sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target) + return _mean_squared_log_error_compute(sum_squared_log_error, n_obs) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/r2.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/r2.py new file mode 100644 index 0000000000000000000000000000000000000000..db0e3b14a702abf8ad9e3881f06a8d668f4bfb0e --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/r2.py @@ -0,0 +1,174 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import torch +from torch import Tensor + +from torchmetrics.utilities import rank_zero_warn +from torchmetrics.utilities.checks import _check_same_shape + + +def _r2_score_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Updates and returns variables required to compute R2 score. + + Checks for same shape and 1D/2D input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + if preds.ndim > 2: + raise ValueError( + "Expected both prediction and target to be 1D or 2D tensors," + f" but received tensors with dimension {preds.shape}" + ) + + sum_obs = torch.sum(target, dim=0) + sum_squared_obs = torch.sum(target * target, dim=0) + residual = target - preds + rss = torch.sum(residual * residual, dim=0) + n_obs = target.size(0) + return sum_squared_obs, sum_obs, rss, n_obs + + +def _r2_score_compute( + sum_squared_obs: Tensor, + sum_obs: Tensor, + rss: Tensor, + n_obs: Tensor, + adjusted: int = 0, + multioutput: str = "uniform_average", +) -> Tensor: + """Computes R2 score. + + Args: + sum_squared_obs: Sum of square of all observations + sum_obs: Sum of all observations + rss: Residual sum of squares + n_obs: Number of predictions or observations + adjusted: number of independent regressors for calculating adjusted r2 score. + multioutput: Defines aggregation in the case of multiple output scores. Can be one of the following strings: + + * `'raw_values'` returns full set of scores + * `'uniform_average'` scores are uniformly averaged + * `'variance_weighted'` scores are weighted by their individual variances + + Example: + >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> sum_squared_obs, sum_obs, rss, n_obs = _r2_score_update(preds, target) + >>> _r2_score_compute(sum_squared_obs, sum_obs, rss, n_obs, multioutput="raw_values") + tensor([0.9654, 0.9082]) + """ + if n_obs < 2: + raise ValueError("Needs at least two samples to calculate r2 score.") + + mean_obs = sum_obs / n_obs + tss = sum_squared_obs - sum_obs * mean_obs + + # Account for near constant targets + cond_rss = ~torch.isclose(rss, torch.zeros_like(rss), atol=1e-4) + cond_tss = ~torch.isclose(tss, torch.zeros_like(tss), atol=1e-4) + cond = cond_rss & cond_tss + + raw_scores = torch.ones_like(rss) + raw_scores[cond] = 1 - (rss[cond] / tss[cond]) + raw_scores[cond_rss & ~cond_tss] = 0.0 + + if multioutput == "raw_values": + r2 = raw_scores + elif multioutput == "uniform_average": + r2 = torch.mean(raw_scores) + elif multioutput == "variance_weighted": + tss_sum = torch.sum(tss) + r2 = torch.sum(tss / tss_sum * raw_scores) + else: + raise ValueError( + "Argument `multioutput` must be either `raw_values`," + f" `uniform_average` or `variance_weighted`. Received {multioutput}." + ) + + if adjusted < 0 or not isinstance(adjusted, int): + raise ValueError("`adjusted` parameter should be an integer larger or" " equal to 0.") + + if adjusted != 0: + if adjusted > n_obs - 1: + rank_zero_warn( + "More independent regressions than data points in" + " adjusted r2 score. Falls back to standard r2 score.", + UserWarning, + ) + elif adjusted == n_obs - 1: + rank_zero_warn("Division by zero in adjusted r2 score. Falls back to" " standard r2 score.", UserWarning) + else: + r2 = 1 - (1 - r2) * (n_obs - 1) / (n_obs - adjusted - 1) + return r2 + + +def r2_score( + preds: Tensor, + target: Tensor, + adjusted: int = 0, + multioutput: str = "uniform_average", +) -> Tensor: + r"""Computes r2 score also known as `R2 Score_Coefficient Determination`_: + + .. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}} + + where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and + :math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate + adjusted r2 score given by + + .. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1} + + where the parameter :math:`k` (the number of independent regressors) should + be provided as the ``adjusted`` argument. + + Args: + preds: estimated labels + target: ground truth labels + adjusted: number of independent regressors for calculating adjusted r2 score. + multioutput: Defines aggregation in the case of multiple output scores. Can be one of the following strings: + + * ``'raw_values'`` returns full set of scores + * ``'uniform_average'`` scores are uniformly averaged + * ``'variance_weighted'`` scores are weighted by their individual variances + + Raises: + ValueError: + If both ``preds`` and ``targets`` are not ``1D`` or ``2D`` tensors. + ValueError: + If ``len(preds)`` is less than ``2`` since at least ``2`` sampels are needed to calculate r2 score. + ValueError: + If ``multioutput`` is not one of ``raw_values``, ``uniform_average`` or ``variance_weighted``. + ValueError: + If ``adjusted`` is not an ``integer`` greater than ``0``. + + Example: + >>> from torchmetrics.functional import r2_score + >>> target = torch.tensor([3, -0.5, 2, 7]) + >>> preds = torch.tensor([2.5, 0.0, 2, 8]) + >>> r2_score(preds, target) + tensor(0.9486) + + >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]]) + >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]]) + >>> r2_score(preds, target, multioutput='raw_values') + tensor([0.9654, 0.9082]) + """ + sum_squared_obs, sum_obs, rss, n_obs = _r2_score_update(preds, target) + return _r2_score_compute(sum_squared_obs, sum_obs, rss, n_obs, adjusted, multioutput) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/symmetric_mape.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/symmetric_mape.py new file mode 100644 index 0000000000000000000000000000000000000000..660ee5d0c22f04f004ef3dbd84ada6e5060da61c --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/symmetric_mape.py @@ -0,0 +1,98 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import torch +from torch import Tensor + +from torchmetrics.utilities.checks import _check_same_shape + + +def _symmetric_mean_absolute_percentage_error_update( + preds: Tensor, + target: Tensor, + epsilon: float = 1.17e-06, +) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Symmetric Mean Absolute Percentage Error. + + Checks for same shape of input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + epsilon: Avoids ``ZeroDivisionError``. + """ + + _check_same_shape(preds, target) + + abs_diff = torch.abs(preds - target) + abs_per_error = abs_diff / torch.clamp(torch.abs(target) + torch.abs(preds), min=epsilon) + + sum_abs_per_error = 2 * torch.sum(abs_per_error) + + num_obs = target.numel() + + return sum_abs_per_error, num_obs + + +def _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error: Tensor, num_obs: int) -> Tensor: + """Computes Symmetric Mean Absolute Percentage Error. + + Args: + sum_abs_per_error: Sum of values of symmetric absolute percentage errors over all observations + ``(symmetric absolute percentage error = 2 * |target - prediction| / (target + prediction))`` + num_obs: Number of predictions or observations + + Example: + >>> target = torch.tensor([1, 10, 1e6]) + >>> preds = torch.tensor([0.9, 15, 1.2e6]) + >>> sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target) + >>> _symmetric_mean_absolute_percentage_error_compute(sum_abs_per_error, num_obs) + tensor(0.2290) + """ + + return sum_abs_per_error / num_obs + + +def symmetric_mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor: + r"""Computes symmetric mean absolute percentage error (SMAPE_): + + .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{max(| y_i | + | \hat{y_i} |, \epsilon)} + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with SMAPE. + + Example: + >>> from torchmetrics.functional import symmetric_mean_absolute_percentage_error + >>> target = torch.tensor([1, 10, 1e6]) + >>> preds = torch.tensor([0.9, 15, 1.2e6]) + >>> symmetric_mean_absolute_percentage_error(preds, target) + tensor(0.2290) + """ + sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update( + preds, + target, + ) + mean_ape = _symmetric_mean_absolute_percentage_error_compute( + sum_abs_per_error, + num_obs, + ) + + return mean_ape diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/tweedie_deviance.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/tweedie_deviance.py new file mode 100644 index 0000000000000000000000000000000000000000..4806caf13528f938bbf289ee077904c86c47f649 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/tweedie_deviance.py @@ -0,0 +1,140 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import torch +from torch import Tensor + +from torchmetrics.utilities.checks import _check_same_shape +from torchmetrics.utilities.compute import _safe_xlogy + + +def _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]: + """Updates and returns variables required to compute Deviance Score for the given power. + + Checks for same shape of input tensors. + + Args: + preds: Predicted tensor + targets: Ground truth tensor + power: see :func:`tweedie_deviance_score` + + Example: + >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0]) + >>> _tweedie_deviance_score_update(preds, targets, power=2) + (tensor(4.8333), tensor(4)) + """ + _check_same_shape(preds, targets) + + zero_tensor = torch.zeros(preds.shape, device=preds.device) + + if 0 < power < 1: + raise ValueError(f"Deviance Score is not defined for power={power}.") + + if power == 0: + deviance_score = torch.pow(targets - preds, exponent=2) + elif power == 1: + # Poisson distribution + if torch.any(preds <= 0) or torch.any(targets < 0): + raise ValueError( + f"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative." + ) + + deviance_score = 2 * (_safe_xlogy(targets, targets / preds) + preds - targets) + elif power == 2: + # Gamma distribution + if torch.any(preds <= 0) or torch.any(targets <= 0): + raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") + + deviance_score = 2 * (torch.log(preds / targets) + (targets / preds) - 1) + else: + if power < 0: + if torch.any(preds <= 0): + raise ValueError(f"For power={power}, 'preds' has to be strictly positive.") + elif 1 < power < 2: + if torch.any(preds <= 0) or torch.any(targets < 0): + raise ValueError( + f"For power={power}, 'targets' has to be strictly positive and 'preds' cannot be negative." + ) + else: + if torch.any(preds <= 0) or torch.any(targets <= 0): + raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") + + term_1 = torch.pow(torch.max(targets, zero_tensor), 2 - power) / ((1 - power) * (2 - power)) + term_2 = targets * torch.pow(preds, 1 - power) / (1 - power) + term_3 = torch.pow(preds, 2 - power) / (2 - power) + deviance_score = 2 * (term_1 - term_2 + term_3) + + sum_deviance_score = torch.sum(deviance_score) + num_observations = torch.tensor(torch.numel(deviance_score), device=preds.device) + + return sum_deviance_score, num_observations + + +def _tweedie_deviance_score_compute(sum_deviance_score: Tensor, num_observations: Tensor) -> Tensor: + """Computes Deviance Score. + + Args: + sum_deviance_score: Sum of deviance scores accumalated until now. + num_observations: Number of observations encountered until now. + + Example: + >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0]) + >>> sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=2) + >>> _tweedie_deviance_score_compute(sum_deviance_score, num_observations) + tensor(1.2083) + """ + + return sum_deviance_score / num_observations + + +def tweedie_deviance_score(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tensor: + r"""Computes the `Tweedie Deviance Score`_ between targets and predictions: + + .. math:: + deviance\_score(\hat{y},y) = + \begin{cases} + (\hat{y} - y)^2, & \text{for }p=0\\ + 2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }p=1\\ + 2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }p=2\\ + 2 * (\frac{(max(y,0))^{2 - p}}{(1 - p)(2 - p)} - \frac{y(\hat{y})^{1 - p}}{1 - p} + \frac{( + \hat{y})^{2 - p}}{2 - p}), & \text{otherwise} + \end{cases} + + where :math:`y` is a tensor of targets values, :math:`\hat{y}` is a tensor of predictions, and + :math:`p` is the `power`. + + Args: + preds: Predicted tensor with shape ``(N,...)`` + targets: Ground truth tensor with shape ``(N,...)`` + power: + - `power < 0` : Extreme stable distribution. (Requires: preds > 0.) + - `power = 0` : Normal distribution. (Requires: targets and preds can be any real numbers.) + - `power = 1` : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.) + - `1 < p < 2` : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.) + - `power = 2` : Gamma distribution. (Requires: targets > 0 and preds > 0.) + - `power = 3` : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.) + - `otherwise` : Positive stable distribution. (Requires: targets > 0 and preds > 0.) + + Example: + >>> from torchmetrics.functional import tweedie_deviance_score + >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0]) + >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0]) + >>> tweedie_deviance_score(preds, targets, power=2) + tensor(1.2083) + """ + sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=power) + return _tweedie_deviance_score_compute(sum_deviance_score, num_observations) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/utils.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..71a7f253bde81347e36784ead6dde4d0d5a78d8f --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/utils.py @@ -0,0 +1,28 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from torch import Tensor + + +def _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) -> None: + """Check that predictions and target have the correct shape, else raise error.""" + if preds.ndim > 2 or target.ndim > 2: + raise ValueError( + f"Expected both predictions and target to be either 1- or 2-dimensional tensors," + f" but got {target.ndim} and {preds.ndim}." + ) + if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]): + raise ValueError( + f"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}" + f" and {preds.shape[1]}." + ) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/wmape.py b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/wmape.py new file mode 100644 index 0000000000000000000000000000000000000000..06754babcccee33276267fb49f2f618bf2da85fa --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/wmape.py @@ -0,0 +1,92 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import torch +from torch import Tensor + +from torchmetrics.utilities.checks import _check_same_shape + + +def _weighted_mean_absolute_percentage_error_update( + preds: Tensor, + target: Tensor, +) -> Tuple[Tensor, int]: + """Updates and returns variables required to compute Weighted Absolute Percentage Error. + + Checks for same shape of input tensors. + + Args: + preds: Predicted tensor + target: Ground truth tensor + """ + + _check_same_shape(preds, target) + + sum_abs_error = (preds - target).abs().sum() + sum_scale = target.abs().sum() + + return sum_abs_error, sum_scale + + +def _weighted_mean_absolute_percentage_error_compute( + sum_abs_error: Tensor, + sum_scale: Tensor, + epsilon: float = 1.17e-06, +) -> Tensor: + """Computes Weighted Absolute Percentage Error. + + Args: + sum_abs_error: scalar with sum of absolute errors + sum_scale: scalar with sum of target values + epsilon: small float to prevent division by zero + """ + return sum_abs_error / torch.clamp(sum_scale, min=epsilon) + + +def weighted_mean_absolute_percentage_error(preds: Tensor, target: Tensor) -> Tensor: + r"""Computes weighted mean absolute percentage error (`WMAPE`_). + + The output of WMAPE metric is a non-negative floating point, where the optimal value is 0. It is computes as: + + .. math:: + \text{WMAPE} = \frac{\sum_{t=1}^n | y_t - \hat{y}_t | }{\sum_{t=1}^n |y_t| } + + Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions. + + Args: + preds: estimated labels + target: ground truth labels + + Return: + Tensor with WMAPE. + + Example: + >>> import torch + >>> _ = torch.manual_seed(42) + >>> preds = torch.randn(20,) + >>> target = torch.randn(20,) + >>> weighted_mean_absolute_percentage_error(preds, target) + tensor(1.3967) + """ + sum_abs_error, sum_scale = _weighted_mean_absolute_percentage_error_update( + preds, + target, + ) + weighted_ape = _weighted_mean_absolute_percentage_error_compute( + sum_abs_error, + sum_scale, + ) + + return weighted_ape diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__init__.py b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d26572f4a273da382c06cdbf2ff04a7e9c82112e --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__init__.py @@ -0,0 +1,17 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from torchmetrics.nominal.cramers import CramersV # noqa: F401 +from torchmetrics.nominal.pearson import PearsonsContingencyCoefficient # noqa: F401 +from torchmetrics.nominal.theils_u import TheilsU # noqa: F401 +from torchmetrics.nominal.tschuprows import TschuprowsT # noqa: F401 diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3b03e55b769d0e60972f078d8b484cd2105b596 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/cramers.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/cramers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c51976707e3c8e200cd22e79c83785aac49b74a5 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/cramers.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/pearson.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/pearson.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c23cb00c793a44c5493f0e40a5c699fffd1289ff Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/pearson.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/theils_u.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/theils_u.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b1e193fe2703665991cc565f402ba238bf47eeb Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/theils_u.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/tschuprows.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/tschuprows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c27c6d04576e848b390d8e1bd4263703c7d50508 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/__pycache__/tschuprows.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/nominal/cramers.py b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/cramers.py new file mode 100644 index 0000000000000000000000000000000000000000..31b378fc5ca6aeddcdc56702b242d9cfc58ed2c0 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/cramers.py @@ -0,0 +1,108 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Union + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.nominal.cramers import _cramers_v_compute, _cramers_v_update +from torchmetrics.functional.nominal.utils import _nominal_input_validation +from torchmetrics.metric import Metric + + +class CramersV(Metric): + r"""Compute `Cramer's V`_ statistic measuring the association between two categorical (nominal) data series. + + .. math:: + V = \sqrt{\frac{\chi^2 / n}{\min(r - 1, k - 1)}} + + where + + .. math:: + \chi^2 = \sum_{i,j} \ frac{\left(n_{ij} - \frac{n_{i.} n_{.j}}{n}\right)^2}{\frac{n_{i.} n_{.j}}{n}} + + where :math:`n_{ij}` denotes the number of times the values :math:`(A_i, B_j)` are observed with :math:`A_i, B_j` + represent frequencies of values in ``preds`` and ``target``, respectively. + + Cramer's V is a symmetric coefficient, i.e. :math:`V(preds, target) = V(target, preds)`. + + The output values lies in [0, 1] with 1 meaning the perfect association. + + Args: + num_classes: Integer specifing the number of classes + bias_correction: Indication of whether to use bias correction. + nan_strategy: Indication of whether to replace or drop ``NaN`` values + nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'`` + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Returns: + Cramer's V statistic + + Raises: + ValueError: + If `nan_strategy` is not one of `'replace'` and `'drop'` + ValueError: + If `nan_strategy` is equal to `'replace'` and `nan_replace_value` is not an `int` or `float` + + Example: + >>> from torchmetrics import CramersV + >>> _ = torch.manual_seed(42) + >>> preds = torch.randint(0, 4, (100,)) + >>> target = torch.round(preds + torch.randn(100)).clamp(0, 4) + >>> cramers_v = CramersV(num_classes=5) + >>> cramers_v(preds, target) + tensor(0.5284) + """ + + full_state_update: bool = False + is_differentiable: bool = False + higher_is_better: bool = True + confmat: Tensor + + def __init__( + self, + num_classes: int, + bias_correction: bool = True, + nan_strategy: Literal["replace", "drop"] = "replace", + nan_replace_value: Optional[Union[int, float]] = 0.0, + **kwargs: Any, + ): + super().__init__(**kwargs) + self.num_classes = num_classes + self.bias_correction = bias_correction + + _nominal_input_validation(nan_strategy, nan_replace_value) + self.nan_strategy = nan_strategy + self.nan_replace_value = nan_replace_value + + self.add_state("confmat", torch.zeros(num_classes, num_classes), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: + """Update state with predictions and targets. + + Args: + preds: 1D or 2D tensor of categorical (nominal) data + - 1D shape: (batch_size,) + - 2D shape: (batch_size, num_classes) + target: 1D or 2D tensor of categorical (nominal) data + - 1D shape: (batch_size,) + - 2D shape: (batch_size, num_classes) + """ + confmat = _cramers_v_update(preds, target, self.num_classes, self.nan_strategy, self.nan_replace_value) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computer Cramer's V statistic.""" + return _cramers_v_compute(self.confmat, self.bias_correction) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/nominal/theils_u.py b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/theils_u.py new file mode 100644 index 0000000000000000000000000000000000000000..4d26bdba8b33c9dcaea9a9f105d10966ca8e9a91 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/theils_u.py @@ -0,0 +1,95 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Union + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.nominal.theils_u import _theils_u_compute, _theils_u_update +from torchmetrics.functional.nominal.utils import _nominal_input_validation +from torchmetrics.metric import Metric + + +class TheilsU(Metric): + r"""Compute `Theil's U`_ statistic (Uncertainty Coefficient) measuring the association between two categorical + (nominal) data series. + + .. math:: + U(X|Y) = \frac{H(X) - H(X|Y)}{H(X)} + + where :math:`H(X)` is entropy of variable :math:`X` while :math:`H(X|Y)` is the conditional entropy of :math:`X` + given :math:`Y`. + + Theils's U is an asymmetric coefficient, i.e. :math:`TheilsU(preds, target) \neq TheilsU(target, preds)`. + + The output values lies in [0, 1]. 0 means y has no information about x while value 1 means y has complete + information about x. + + Args: + num_classes: Integer specifing the number of classes + nan_strategy: Indication of whether to replace or drop ``NaN`` values + nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'`` + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Returns: + Theil's U Statistic: Tensor + + Example: + >>> from torchmetrics import TheilsU + >>> _ = torch.manual_seed(42) + >>> preds = torch.randint(10, (10,)) + >>> target = torch.randint(10, (10,)) + >>> TheilsU(num_classes=10)(preds, target) + tensor(0.8530) + """ + + full_state_update: bool = False + is_differentiable: bool = False + higher_is_better: bool = True + confmat: Tensor + + def __init__( + self, + num_classes: int, + nan_strategy: Literal["replace", "drop"] = "replace", + nan_replace_value: Optional[Union[int, float]] = 0.0, + **kwargs: Any, + ): + super().__init__(**kwargs) + self.num_classes = num_classes + + _nominal_input_validation(nan_strategy, nan_replace_value) + self.nan_strategy = nan_strategy + self.nan_replace_value = nan_replace_value + + self.add_state("confmat", torch.zeros(num_classes, num_classes), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: + """Update state with predictions and targets. + + Args: + preds: 1D or 2D tensor of categorical (nominal) data + - 1D shape: (batch_size,) + - 2D shape: (batch_size, num_classes) + target: 1D or 2D tensor of categorical (nominal) data + - 1D shape: (batch_size,) + - 2D shape: (batch_size, num_classes) + """ + confmat = _theils_u_update(preds, target, self.num_classes, self.nan_strategy, self.nan_replace_value) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computer Theil's U statistic.""" + return _theils_u_compute(self.confmat) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/nominal/tschuprows.py b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/tschuprows.py new file mode 100644 index 0000000000000000000000000000000000000000..4a0327fb348d18720478bbdd14ff38fe493b50f5 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/nominal/tschuprows.py @@ -0,0 +1,111 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Union + +import torch +from torch import Tensor +from typing_extensions import Literal + +from torchmetrics.functional.nominal.tschuprows import _tschuprows_t_compute, _tschuprows_t_update +from torchmetrics.functional.nominal.utils import _nominal_input_validation +from torchmetrics.metric import Metric + + +class TschuprowsT(Metric): + r"""Compute `Tschuprow's T`_ statistic measuring the association between two categorical (nominal) data series. + + .. math:: + T = \sqrt{\frac{\chi^2 / n}{\sqrt{(r - 1) * (k - 1)}}} + + where + + .. math:: + \chi^2 = \sum_{i,j} \ frac{\left(n_{ij} - \frac{n_{i.} n_{.j}}{n}\right)^2}{\frac{n_{i.} n_{.j}}{n}} + + where :math:`n_{ij}` denotes the number of times the values :math:`(A_i, B_j)` are observed + with :math:`A_i, B_j` represent frequencies of values in ``preds`` and ``target``, respectively. + + Tschuprow's T is a symmetric coefficient, i.e. :math:`T(preds, target) = T(target, preds)`. + + The output values lies in [0, 1] with 1 meaning the perfect association. + + Args: + num_classes: Integer specifing the number of classes + bias_correction: Indication of whether to use bias correction. + nan_strategy: Indication of whether to replace or drop ``NaN`` values + nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'`` + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Returns: + Tschuprow's T statistic + + Raises: + ValueError: + If `nan_strategy` is not one of `'replace'` and `'drop'` + ValueError: + If `nan_strategy` is equal to `'replace'` and `nan_replace_value` is not an `int` or `float` + + Example: + >>> from torchmetrics import TschuprowsT + >>> _ = torch.manual_seed(42) + >>> preds = torch.randint(0, 4, (100,)) + >>> target = torch.round(preds + torch.randn(100)).clamp(0, 4) + >>> tschuprows_t = TschuprowsT(num_classes=5) + >>> tschuprows_t(preds, target) + tensor(0.4930) + """ + + full_state_update: bool = False + is_differentiable: bool = False + higher_is_better: bool = True + confmat: Tensor + + def __init__( + self, + num_classes: int, + bias_correction: bool = True, + nan_strategy: Literal["replace", "drop"] = "replace", + nan_replace_value: Optional[Union[int, float]] = 0.0, + **kwargs: Any, + ): + super().__init__(**kwargs) + self.num_classes = num_classes + self.bias_correction = bias_correction + + _nominal_input_validation(nan_strategy, nan_replace_value) + self.nan_strategy = nan_strategy + self.nan_replace_value = nan_replace_value + + self.add_state("confmat", torch.zeros(num_classes, num_classes), dist_reduce_fx="sum") + + def update(self, preds: Tensor, target: Tensor) -> None: + """Update state with predictions and targets. + + Args: + preds: 1D or 2D tensor of categorical (nominal) data: + + - 1D shape: (batch_size,) + - 2D shape: (batch_size, num_classes) + + target: 1D or 2D tensor of categorical (nominal) data: + + - 1D shape: (batch_size,) + - 2D shape: (batch_size, num_classes) + """ + confmat = _tschuprows_t_update(preds, target, self.num_classes, self.nan_strategy, self.nan_replace_value) + self.confmat += confmat + + def compute(self) -> Tensor: + """Computer Tschuprow's T statistic.""" + return _tschuprows_t_compute(self.confmat, self.bias_correction) diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/py.typed b/wemm/lib/python3.10/site-packages/torchmetrics/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/text/__init__.py b/wemm/lib/python3.10/site-packages/torchmetrics/text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..072ca55d3fcb06c93008b5dd5be9f9b85c1d4256 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/text/__init__.py @@ -0,0 +1,33 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from torchmetrics.text.bleu import BLEUScore # noqa: F401 +from torchmetrics.text.cer import CharErrorRate # noqa: F401 +from torchmetrics.text.chrf import CHRFScore # noqa: F401 +from torchmetrics.text.eed import ExtendedEditDistance # noqa: F401 +from torchmetrics.text.mer import MatchErrorRate # noqa: F401 +from torchmetrics.text.perplexity import Perplexity # noqa: F401 +from torchmetrics.text.sacre_bleu import SacreBLEUScore # noqa: F401 +from torchmetrics.text.squad import SQuAD # noqa: F401 +from torchmetrics.text.ter import TranslationEditRate # noqa: F401 +from torchmetrics.text.wer import WordErrorRate # noqa: F401 +from torchmetrics.text.wil import WordInfoLost # noqa: F401 +from torchmetrics.text.wip import WordInfoPreserved # noqa: F401 +from torchmetrics.utilities.imports import _NLTK_AVAILABLE, _TRANSFORMERS_AVAILABLE + +if _TRANSFORMERS_AVAILABLE: + from torchmetrics.text.bert import BERTScore # noqa: F401 + from torchmetrics.text.infolm import InfoLM # noqa: F401 + +if _NLTK_AVAILABLE: + from torchmetrics.text.rouge import ROUGEScore # noqa: F401 diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/mer.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/mer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8aaeb511f8189d3377da3054df12b21a399fc55 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/mer.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/perplexity.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/perplexity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ae8f101f8eb0f76df9b578416d8828a7e1f36b7 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/perplexity.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/ter.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/ter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f68c0f5870331c03b3924181d2fcb5def3a7a66 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/ter.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/wer.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/wer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4e069f7a652c77968b15451b9ccf936dfc99307 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/wer.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/wil.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/wil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e33929ca8b6410d49c413d061ad0d66dff5420e9 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torchmetrics/text/__pycache__/wil.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torchmetrics/text/bert.py b/wemm/lib/python3.10/site-packages/torchmetrics/text/bert.py new file mode 100644 index 0000000000000000000000000000000000000000..d4056136bbba9f0eeea92fb5bc9e246d8d50a855 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torchmetrics/text/bert.py @@ -0,0 +1,241 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Dict, List, Optional, Union +from warnings import warn + +import torch +from torch import Tensor +from torch.nn import Module + +from torchmetrics.functional.text.bert import bert_score +from torchmetrics.functional.text.helper_embedding_metric import _preprocess_text +from torchmetrics.metric import Metric +from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout +from torchmetrics.utilities.imports import _TRANSFORMERS_AVAILABLE + +# Default model recommended in the original implementation. +_DEFAULT_MODEL = "roberta-large" + +if _TRANSFORMERS_AVAILABLE: + from transformers import AutoModel, AutoTokenizer + + def _download_model() -> None: + """Download intensive operations.""" + AutoTokenizer.from_pretrained(_DEFAULT_MODEL) + AutoModel.from_pretrained(_DEFAULT_MODEL) + + if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_model): + __doctest_skip__ = ["BERTScore"] +else: + __doctest_skip__ = ["BERTScore"] + + +def _get_input_dict(input_ids: List[Tensor], attention_mask: List[Tensor]) -> Dict[str, Tensor]: + """Create an input dictionary of ``input_ids`` and ``attention_mask`` for BERTScore calculation.""" + output_dict = {"input_ids": torch.cat(input_ids), "attention_mask": torch.cat(attention_mask)} + return output_dict + + +class BERTScore(Metric): + """`Bert_score Evaluating Text Generation`_ leverages the pre-trained contextual embeddings from BERT and + matches words in candidate and reference sentences by cosine similarity. It has been shown to correlate with + human judgment on sentence-level and system-level evaluation. Moreover, BERTScore computes precision, recall, + and F1 measure, which can be useful for evaluating different language generation tasks. + + This implemenation follows the original implementation from `BERT_score`_. + + As input to ``forward`` and ``update`` the metric accepts the following input: + + - ``preds`` (:class:`~List`): An iterable of predicted sentences + - ``target`` (:class:`~List`): An iterable of reference sentences + + As output of ``forward`` and ``compute`` the metric returns the following output: + + - ``score`` (:class:`~Dict`): A dictionary containing the keys ``precision``, ``recall`` and ``f1`` with + corresponding values + + Args: + preds: An iterable of predicted sentences. + target: An iterable of target sentences. + model_type: A name or a model path used to load ``transformers`` pretrained model. + num_layers: A layer of representation to use. + all_layers: + An indication of whether the representation from all model's layers should be used. + If ``all_layers=True``, the argument ``num_layers`` is ignored. + model: A user's own model. Must be of `torch.nn.Module` instance. + user_tokenizer: + A user's own tokenizer used with the own model. This must be an instance with the ``__call__`` method. + This method must take an iterable of sentences (`List[str]`) and must return a python dictionary + containing `"input_ids"` and `"attention_mask"` represented by :class:`~torch.Tensor`. + It is up to the user's model of whether `"input_ids"` is a :class:`~torch.Tensor` of input ids or embedding + vectors. This tokenizer must prepend an equivalent of ``[CLS]`` token and append an equivalent of ``[SEP]`` + token as ``transformers`` tokenizer does. + user_forward_fn: + A user's own forward function used in a combination with ``user_model``. This function must take + ``user_model`` and a python dictionary of containing ``"input_ids"`` and ``"attention_mask"`` represented + by :class:`~torch.Tensor` as an input and return the model's output represented by the single + :class:`~torch.Tensor`. + verbose: An indication of whether a progress bar to be displayed during the embeddings' calculation. + idf: An indication whether normalization using inverse document frequencies should be used. + device: A device to be used for calculation. + max_length: A maximum length of input sequences. Sequences longer than ``max_length`` are to be trimmed. + batch_size: A batch size used for model processing. + num_threads: A number of threads to use for a dataloader. + return_hash: An indication of whether the correspodning ``hash_code`` should be returned. + lang: A language of input sentences. + rescale_with_baseline: + An indication of whether bertscore should be rescaled with a pre-computed baseline. + When a pretrained model from ``transformers`` model is used, the corresponding baseline is downloaded + from the original ``bert-score`` package from `BERT_score`_ if available. + In other cases, please specify a path to the baseline csv/tsv file, which must follow the formatting + of the files from `BERT_score`_. + baseline_path: A path to the user's own local csv/tsv file with the baseline scale. + baseline_url: A url path to the user's own csv/tsv file with the baseline scale. + kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. + + Example: + >>> from torchmetrics.text.bert import BERTScore + >>> preds = ["hello there", "general kenobi"] + >>> target = ["hello there", "master kenobi"] + >>> bertscore = BERTScore() + >>> score = bertscore(preds, target) + >>> from pprint import pprint + >>> rounded_score = {k: [round(v, 3) for v in vv] for k, vv in score.items()} + >>> pprint(rounded_score) + {'f1': [1.0, 0.996], 'precision': [1.0, 0.996], 'recall': [1.0, 0.996]} + """ + + is_differentiable: bool = False + higher_is_better: bool = True + full_state_update: bool = False + + preds_input_ids: List[Tensor] + preds_attention_mask: List[Tensor] + target_input_ids: List[Tensor] + target_attention_mask: List[Tensor] + + def __init__( + self, + model_name_or_path: Optional[str] = None, + num_layers: Optional[int] = None, + all_layers: bool = False, + model: Optional[Module] = None, + user_tokenizer: Optional[Any] = None, + user_forward_fn: Callable[[Module, Dict[str, Tensor]], Tensor] = None, + verbose: bool = False, + idf: bool = False, + device: Optional[Union[str, torch.device]] = None, + max_length: int = 512, + batch_size: int = 64, + num_threads: int = 4, + return_hash: bool = False, + lang: str = "en", + rescale_with_baseline: bool = False, + baseline_path: Optional[str] = None, + baseline_url: Optional[str] = None, + **kwargs: Any, + ): + super().__init__(**kwargs) + self.model_name_or_path = model_name_or_path or _DEFAULT_MODEL + self.num_layers = num_layers + self.all_layers = all_layers + self.model = model + self.user_forward_fn = user_forward_fn + self.verbose = verbose + self.idf = idf + self.embedding_device = device + self.max_length = max_length + self.batch_size = batch_size + self.num_threads = num_threads + self.return_hash = return_hash + self.lang = lang + self.rescale_with_baseline = rescale_with_baseline + self.baseline_path = baseline_path + self.baseline_url = baseline_url + self.preds: Dict[str, List[Tensor]] = {"input_ids": [], "attention_mask": []} + self.target: Dict[str, List[Tensor]] = {"input_ids": [], "attention_mask": []} + + if user_tokenizer: + self.tokenizer = user_tokenizer + self.user_tokenizer = True + else: + if not _TRANSFORMERS_AVAILABLE: + raise ModuleNotFoundError( + "`BERTScore` metric with default tokenizers requires `transformers` package be installed." + " Either install with `pip install transformers>=4.0` or `pip install torchmetrics[text]`." + ) + if model_name_or_path is None: + warn( + "The argument `model_name_or_path` was not specified while it is required when the default" + " `transformers` model is used." + f" It will use the default recommended model - {_DEFAULT_MODEL!r}." + ) + self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path) + self.user_tokenizer = False + + self.add_state("preds_input_ids", [], dist_reduce_fx="cat") + self.add_state("preds_attention_mask", [], dist_reduce_fx="cat") + self.add_state("target_input_ids", [], dist_reduce_fx="cat") + self.add_state("target_attention_mask", [], dist_reduce_fx="cat") + + def update(self, preds: List[str], target: List[str]) -> None: + """Store predictions/references for computing BERT scores. + + It is necessary to store sentences in a tokenized form to ensure the DDP mode working. + """ + preds_dict, _ = _preprocess_text( + preds, + self.tokenizer, + self.max_length, + truncation=False, + sort_according_length=False, + own_tokenizer=self.user_tokenizer, + ) + target_dict, _ = _preprocess_text( + target, + self.tokenizer, + self.max_length, + truncation=False, + sort_according_length=False, + own_tokenizer=self.user_tokenizer, + ) + + self.preds_input_ids.append(preds_dict["input_ids"]) + self.preds_attention_mask.append(preds_dict["attention_mask"]) + self.target_input_ids.append(target_dict["input_ids"]) + self.target_attention_mask.append(target_dict["attention_mask"]) + + def compute(self) -> Dict[str, Union[List[float], str]]: + """Calculate BERT scores.""" + return bert_score( + preds=_get_input_dict(self.preds_input_ids, self.preds_attention_mask), + target=_get_input_dict(self.target_input_ids, self.target_attention_mask), + model_name_or_path=self.model_name_or_path, + num_layers=self.num_layers, + all_layers=self.all_layers, + model=self.model, + user_tokenizer=self.tokenizer if self.user_tokenizer else None, + user_forward_fn=self.user_forward_fn, + verbose=self.verbose, + idf=self.idf, + device=self.embedding_device, + max_length=self.max_length, + batch_size=self.batch_size, + num_threads=self.num_threads, + return_hash=self.return_hash, + lang=self.lang, + rescale_with_baseline=self.rescale_with_baseline, + baseline_path=self.baseline_path, + baseline_url=self.baseline_url, + ) diff --git a/wemm/lib/python3.10/site-packages/wheel-0.45.1.dist-info/METADATA b/wemm/lib/python3.10/site-packages/wheel-0.45.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..26584224472f62ab2196f0a727a86e42ffb63d71 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/wheel-0.45.1.dist-info/METADATA @@ -0,0 +1,66 @@ +Metadata-Version: 2.1 +Name: wheel +Version: 0.45.1 +Summary: A built-package format for Python +Keywords: wheel,packaging +Author-email: Daniel Holth +Maintainer-email: Alex Grönholm +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Topic :: System :: Archiving :: Packaging +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Requires-Dist: pytest >= 6.0.0 ; extra == "test" +Requires-Dist: setuptools >= 65 ; extra == "test" +Project-URL: Changelog, https://wheel.readthedocs.io/en/stable/news.html +Project-URL: Documentation, https://wheel.readthedocs.io/ +Project-URL: Issue Tracker, https://github.com/pypa/wheel/issues +Project-URL: Source, https://github.com/pypa/wheel +Provides-Extra: test + +wheel +===== + +This is a command line tool for manipulating Python wheel files, as defined in +`PEP 427`_. It contains the following functionality: + +* Convert ``.egg`` archives into ``.whl`` +* Unpack wheel archives +* Repack wheel archives +* Add or remove tags in existing wheel archives + +.. _PEP 427: https://www.python.org/dev/peps/pep-0427/ + +Historical note +--------------- + +This project used to contain the implementation of the setuptools_ ``bdist_wheel`` +command, but as of setuptools v70.1, it no longer needs ``wheel`` installed for that to +work. Thus, you should install this **only** if you intend to use the ``wheel`` command +line tool! + +.. _setuptools: https://pypi.org/project/setuptools/ + +Documentation +------------- + +The documentation_ can be found on Read The Docs. + +.. _documentation: https://wheel.readthedocs.io/ + +Code of Conduct +--------------- + +Everyone interacting in the wheel project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_. + +.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md +