ZTWHHH commited on
Commit
0164bd2
·
verified ·
1 Parent(s): fce80f4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. wemm/lib/python3.10/site-packages/GPUtil-1.4.0.dist-info/top_level.txt +1 -0
  2. wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/INSTALLER +1 -0
  3. wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/RECORD +11 -0
  4. wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/REQUESTED +0 -0
  5. wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/WHEEL +6 -0
  6. wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/INSTALLER +1 -0
  7. wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/REQUESTED +0 -0
  8. wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/top_level.txt +1 -0
  9. wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/License.txt +1568 -0
  10. wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/RECORD +23 -0
  11. wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/REQUESTED +0 -0
  12. wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/top_level.txt +1 -0
  13. wemm/lib/python3.10/site-packages/torchmetrics/aggregation.py +408 -0
  14. wemm/lib/python3.10/site-packages/torchmetrics/classification/__init__.py +191 -0
  15. wemm/lib/python3.10/site-packages/torchmetrics/classification/auroc.py +372 -0
  16. wemm/lib/python3.10/site-packages/torchmetrics/classification/average_precision.py +376 -0
  17. wemm/lib/python3.10/site-packages/torchmetrics/classification/calibration_error.py +277 -0
  18. wemm/lib/python3.10/site-packages/torchmetrics/classification/cohen_kappa.py +232 -0
  19. wemm/lib/python3.10/site-packages/torchmetrics/classification/confusion_matrix.py +375 -0
  20. wemm/lib/python3.10/site-packages/torchmetrics/classification/dice.py +237 -0
  21. wemm/lib/python3.10/site-packages/torchmetrics/classification/hamming.py +368 -0
  22. wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall.py +701 -0
  23. wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall_curve.py +489 -0
  24. wemm/lib/python3.10/site-packages/torchmetrics/classification/ranking.py +242 -0
  25. wemm/lib/python3.10/site-packages/torchmetrics/collections.py +483 -0
  26. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__init__.py +125 -0
  27. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/auroc.cpython-310.pyc +0 -0
  28. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/average_precision.cpython-310.pyc +0 -0
  29. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/calibration_error.cpython-310.pyc +0 -0
  30. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/jaccard.cpython-310.pyc +0 -0
  31. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/precision_recall_curve.cpython-310.pyc +0 -0
  32. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/ranking.cpython-310.pyc +0 -0
  33. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/roc.cpython-310.pyc +0 -0
  34. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/accuracy.py +428 -0
  35. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/auroc.py +463 -0
  36. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/calibration_error.py +356 -0
  37. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/cohen_kappa.py +266 -0
  38. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/confusion_matrix.py +647 -0
  39. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/dice.py +207 -0
  40. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/exact_match.py +241 -0
  41. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/f_beta.py +775 -0
  42. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/hinge.py +282 -0
  43. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/matthews_corrcoef.py +246 -0
  44. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall.py +738 -0
  45. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall_curve.py +834 -0
  46. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/recall_at_fixed_precision.py +401 -0
  47. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/roc.py +496 -0
  48. wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/stat_scores.py +1117 -0
  49. wemm/lib/python3.10/site-packages/torchmetrics/functional/nominal/__init__.py +20 -0
  50. wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/concordance.py +68 -0
wemm/lib/python3.10/site-packages/GPUtil-1.4.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ GPUtil
wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/RECORD ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiosignal-1.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ aiosignal-1.3.2.dist-info/LICENSE,sha256=b9UkPpLdf5jsacesN3co50kFcJ_1J6W_mNbQJjwE9bY,11332
3
+ aiosignal-1.3.2.dist-info/METADATA,sha256=TeI_xgZ191qgx37rviEnpMWC0QnYsg_j9EGVivNqqjc,3753
4
+ aiosignal-1.3.2.dist-info/RECORD,,
5
+ aiosignal-1.3.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ aiosignal-1.3.2.dist-info/WHEEL,sha256=pxeNX5JdtCe58PUSYP9upmc7jdRPgvT0Gm9kb1SHlVw,109
7
+ aiosignal-1.3.2.dist-info/top_level.txt,sha256=z45aNOKGDdrI1roqZY3BGXQ22kJFPHBmVdwtLYLtXC0,10
8
+ aiosignal/__init__.py,sha256=1oIrRl6kNpqFh32e7HfMFbMV_35v8sqJJFfnuKgmtEU,867
9
+ aiosignal/__init__.pyi,sha256=xeCddYSS8fZAkz8S4HuKSR2IDe3N7RW_LKcXDPPA1Xk,311
10
+ aiosignal/__pycache__/__init__.cpython-310.pyc,,
11
+ aiosignal/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/REQUESTED ADDED
File without changes
wemm/lib/python3.10/site-packages/aiosignal-1.3.2.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (75.6.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
6
+
wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/REQUESTED ADDED
File without changes
wemm/lib/python3.10/site-packages/nvidia_cuda_nvrtc_cu11-11.7.99.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia
wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/License.txt ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ End User License Agreement
2
+ --------------------------
3
+
4
+
5
+ Preface
6
+ -------
7
+
8
+ The Software License Agreement in Chapter 1 and the Supplement
9
+ in Chapter 2 contain license terms and conditions that govern
10
+ the use of NVIDIA software. By accepting this agreement, you
11
+ agree to comply with all the terms and conditions applicable
12
+ to the product(s) included herein.
13
+
14
+
15
+ NVIDIA Driver
16
+
17
+
18
+ Description
19
+
20
+ This package contains the operating system driver and
21
+ fundamental system software components for NVIDIA GPUs.
22
+
23
+
24
+ NVIDIA CUDA Toolkit
25
+
26
+
27
+ Description
28
+
29
+ The NVIDIA CUDA Toolkit provides command-line and graphical
30
+ tools for building, debugging and optimizing the performance
31
+ of applications accelerated by NVIDIA GPUs, runtime and math
32
+ libraries, and documentation including programming guides,
33
+ user manuals, and API references.
34
+
35
+
36
+ Default Install Location of CUDA Toolkit
37
+
38
+ Windows platform:
39
+
40
+ %ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
41
+
42
+ Linux platform:
43
+
44
+ /usr/local/cuda-#.#
45
+
46
+ Mac platform:
47
+
48
+ /Developer/NVIDIA/CUDA-#.#
49
+
50
+
51
+ NVIDIA CUDA Samples
52
+
53
+
54
+ Description
55
+
56
+ This package includes over 100+ CUDA examples that demonstrate
57
+ various CUDA programming principles, and efficient CUDA
58
+ implementation of algorithms in specific application domains.
59
+
60
+
61
+ Default Install Location of CUDA Samples
62
+
63
+ Windows platform:
64
+
65
+ %ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
66
+
67
+ Linux platform:
68
+
69
+ /usr/local/cuda-#.#/samples
70
+
71
+ and
72
+
73
+ $HOME/NVIDIA_CUDA-#.#_Samples
74
+
75
+ Mac platform:
76
+
77
+ /Developer/NVIDIA/CUDA-#.#/samples
78
+
79
+
80
+ NVIDIA Nsight Visual Studio Edition (Windows only)
81
+
82
+
83
+ Description
84
+
85
+ NVIDIA Nsight Development Platform, Visual Studio Edition is a
86
+ development environment integrated into Microsoft Visual
87
+ Studio that provides tools for debugging, profiling, analyzing
88
+ and optimizing your GPU computing and graphics applications.
89
+
90
+
91
+ Default Install Location of Nsight Visual Studio Edition
92
+
93
+ Windows platform:
94
+
95
+ %ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
96
+
97
+
98
+ 1. License Agreement for NVIDIA Software Development Kits
99
+ ---------------------------------------------------------
100
+
101
+
102
+ Release Date: July 26, 2018
103
+ ---------------------------
104
+
105
+
106
+ Important NoticeRead before downloading, installing,
107
+ copying or using the licensed software:
108
+ -------------------------------------------------------
109
+
110
+ This license agreement, including exhibits attached
111
+ ("Agreement”) is a legal agreement between you and NVIDIA
112
+ Corporation ("NVIDIA") and governs your use of a NVIDIA
113
+ software development kit (“SDK”).
114
+
115
+ Each SDK has its own set of software and materials, but here
116
+ is a description of the types of items that may be included in
117
+ a SDK: source code, header files, APIs, data sets and assets
118
+ (examples include images, textures, models, scenes, videos,
119
+ native API input/output files), binary software, sample code,
120
+ libraries, utility programs, programming code and
121
+ documentation.
122
+
123
+ This Agreement can be accepted only by an adult of legal age
124
+ of majority in the country in which the SDK is used.
125
+
126
+ If you are entering into this Agreement on behalf of a company
127
+ or other legal entity, you represent that you have the legal
128
+ authority to bind the entity to this Agreement, in which case
129
+ “you” will mean the entity you represent.
130
+
131
+ If you don’t have the required age or authority to accept
132
+ this Agreement, or if you don’t accept all the terms and
133
+ conditions of this Agreement, do not download, install or use
134
+ the SDK.
135
+
136
+ You agree to use the SDK only for purposes that are permitted
137
+ by (a) this Agreement, and (b) any applicable law, regulation
138
+ or generally accepted practices or guidelines in the relevant
139
+ jurisdictions.
140
+
141
+
142
+ 1.1. License
143
+
144
+
145
+ 1.1.1. License Grant
146
+
147
+ Subject to the terms of this Agreement, NVIDIA hereby grants
148
+ you a non-exclusive, non-transferable license, without the
149
+ right to sublicense (except as expressly provided in this
150
+ Agreement) to:
151
+
152
+ 1. Install and use the SDK,
153
+
154
+ 2. Modify and create derivative works of sample source code
155
+ delivered in the SDK, and
156
+
157
+ 3. Distribute those portions of the SDK that are identified
158
+ in this Agreement as distributable, as incorporated in
159
+ object code format into a software application that meets
160
+ the distribution requirements indicated in this Agreement.
161
+
162
+
163
+ 1.1.2. Distribution Requirements
164
+
165
+ These are the distribution requirements for you to exercise
166
+ the distribution grant:
167
+
168
+ 1. Your application must have material additional
169
+ functionality, beyond the included portions of the SDK.
170
+
171
+ 2. The distributable portions of the SDK shall only be
172
+ accessed by your application.
173
+
174
+ 3. The following notice shall be included in modifications
175
+ and derivative works of sample source code distributed:
176
+ “This software contains source code provided by NVIDIA
177
+ Corporation.”
178
+
179
+ 4. Unless a developer tool is identified in this Agreement
180
+ as distributable, it is delivered for your internal use
181
+ only.
182
+
183
+ 5. The terms under which you distribute your application
184
+ must be consistent with the terms of this Agreement,
185
+ including (without limitation) terms relating to the
186
+ license grant and license restrictions and protection of
187
+ NVIDIA’s intellectual property rights. Additionally, you
188
+ agree that you will protect the privacy, security and
189
+ legal rights of your application users.
190
+
191
+ 6. You agree to notify NVIDIA in writing of any known or
192
+ suspected distribution or use of the SDK not in compliance
193
+ with the requirements of this Agreement, and to enforce
194
+ the terms of your agreements with respect to distributed
195
+ SDK.
196
+
197
+
198
+ 1.1.3. Authorized Users
199
+
200
+ You may allow employees and contractors of your entity or of
201
+ your subsidiary(ies) to access and use the SDK from your
202
+ secure network to perform work on your behalf.
203
+
204
+ If you are an academic institution you may allow users
205
+ enrolled or employed by the academic institution to access and
206
+ use the SDK from your secure network.
207
+
208
+ You are responsible for the compliance with the terms of this
209
+ Agreement by your authorized users. If you become aware that
210
+ your authorized users didn’t follow the terms of this
211
+ Agreement, you agree to take reasonable steps to resolve the
212
+ non-compliance and prevent new occurrences.
213
+
214
+
215
+ 1.1.4. Pre-Release SDK
216
+
217
+ The SDK versions identified as alpha, beta, preview or
218
+ otherwise as pre-release, may not be fully functional, may
219
+ contain errors or design flaws, and may have reduced or
220
+ different security, privacy, accessibility, availability, and
221
+ reliability standards relative to commercial versions of
222
+ NVIDIA software and materials. Use of a pre-release SDK may
223
+ result in unexpected results, loss of data, project delays or
224
+ other unpredictable damage or loss.
225
+
226
+ You may use a pre-release SDK at your own risk, understanding
227
+ that pre-release SDKs are not intended for use in production
228
+ or business-critical systems.
229
+
230
+ NVIDIA may choose not to make available a commercial version
231
+ of any pre-release SDK. NVIDIA may also choose to abandon
232
+ development and terminate the availability of a pre-release
233
+ SDK at any time without liability.
234
+
235
+
236
+ 1.1.5. Updates
237
+
238
+ NVIDIA may, at its option, make available patches, workarounds
239
+ or other updates to this SDK. Unless the updates are provided
240
+ with their separate governing terms, they are deemed part of
241
+ the SDK licensed to you as provided in this Agreement. You
242
+ agree that the form and content of the SDK that NVIDIA
243
+ provides may change without prior notice to you. While NVIDIA
244
+ generally maintains compatibility between versions, NVIDIA may
245
+ in some cases make changes that introduce incompatibilities in
246
+ future versions of the SDK.
247
+
248
+
249
+ 1.1.6. Third Party Licenses
250
+
251
+ The SDK may come bundled with, or otherwise include or be
252
+ distributed with, third party software licensed by a NVIDIA
253
+ supplier and/or open source software provided under an open
254
+ source license. Use of third party software is subject to the
255
+ third-party license terms, or in the absence of third party
256
+ terms, the terms of this Agreement. Copyright to third party
257
+ software is held by the copyright holders indicated in the
258
+ third-party software or license.
259
+
260
+
261
+ 1.1.7. Reservation of Rights
262
+
263
+ NVIDIA reserves all rights, title, and interest in and to the
264
+ SDK, not expressly granted to you under this Agreement.
265
+
266
+
267
+ 1.2. Limitations
268
+
269
+ The following license limitations apply to your use of the
270
+ SDK:
271
+
272
+ 1. You may not reverse engineer, decompile or disassemble,
273
+ or remove copyright or other proprietary notices from any
274
+ portion of the SDK or copies of the SDK.
275
+
276
+ 2. Except as expressly provided in this Agreement, you may
277
+ not copy, sell, rent, sublicense, transfer, distribute,
278
+ modify, or create derivative works of any portion of the
279
+ SDK. For clarity, you may not distribute or sublicense the
280
+ SDK as a stand-alone product.
281
+
282
+ 3. Unless you have an agreement with NVIDIA for this
283
+ purpose, you may not indicate that an application created
284
+ with the SDK is sponsored or endorsed by NVIDIA.
285
+
286
+ 4. You may not bypass, disable, or circumvent any
287
+ encryption, security, digital rights management or
288
+ authentication mechanism in the SDK.
289
+
290
+ 5. You may not use the SDK in any manner that would cause it
291
+ to become subject to an open source software license. As
292
+ examples, licenses that require as a condition of use,
293
+ modification, and/or distribution that the SDK be:
294
+
295
+ a. Disclosed or distributed in source code form;
296
+
297
+ b. Licensed for the purpose of making derivative works;
298
+ or
299
+
300
+ c. Redistributable at no charge.
301
+
302
+ 6. Unless you have an agreement with NVIDIA for this
303
+ purpose, you may not use the SDK with any system or
304
+ application where the use or failure of the system or
305
+ application can reasonably be expected to threaten or
306
+ result in personal injury, death, or catastrophic loss.
307
+ Examples include use in avionics, navigation, military,
308
+ medical, life support or other life critical applications.
309
+ NVIDIA does not design, test or manufacture the SDK for
310
+ these critical uses and NVIDIA shall not be liable to you
311
+ or any third party, in whole or in part, for any claims or
312
+ damages arising from such uses.
313
+
314
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
315
+ and its affiliates, and their respective employees,
316
+ contractors, agents, officers and directors, from and
317
+ against any and all claims, damages, obligations, losses,
318
+ liabilities, costs or debt, fines, restitutions and
319
+ expenses (including but not limited to attorney’s fees
320
+ and costs incident to establishing the right of
321
+ indemnification) arising out of or related to your use of
322
+ the SDK outside of the scope of this Agreement, or not in
323
+ compliance with its terms.
324
+
325
+
326
+ 1.3. Ownership
327
+
328
+ 1. NVIDIA or its licensors hold all rights, title and
329
+ interest in and to the SDK and its modifications and
330
+ derivative works, including their respective intellectual
331
+ property rights, subject to your rights described in this
332
+ section. This SDK may include software and materials from
333
+ NVIDIA’s licensors, and these licensors are intended
334
+ third party beneficiaries that may enforce this Agreement
335
+ with respect to their intellectual property rights.
336
+
337
+ 2. You hold all rights, title and interest in and to your
338
+ applications and your derivative works of the sample
339
+ source code delivered in the SDK, including their
340
+ respective intellectual property rights, subject to
341
+ NVIDIA’s rights described in this section.
342
+
343
+ 3. You may, but don’t have to, provide to NVIDIA
344
+ suggestions, feature requests or other feedback regarding
345
+ the SDK, including possible enhancements or modifications
346
+ to the SDK. For any feedback that you voluntarily provide,
347
+ you hereby grant NVIDIA and its affiliates a perpetual,
348
+ non-exclusive, worldwide, irrevocable license to use,
349
+ reproduce, modify, license, sublicense (through multiple
350
+ tiers of sublicensees), and distribute (through multiple
351
+ tiers of distributors) it without the payment of any
352
+ royalties or fees to you. NVIDIA will use feedback at its
353
+ choice. NVIDIA is constantly looking for ways to improve
354
+ its products, so you may send feedback to NVIDIA through
355
+ the developer portal at https://developer.nvidia.com.
356
+
357
+
358
+ 1.4. No Warranties
359
+
360
+ THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
361
+ FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
362
+ ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
363
+ OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
364
+ BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
365
+ FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
366
+ ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
367
+ WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
368
+ DEALING OR COURSE OF TRADE.
369
+
370
+
371
+ 1.5. Limitation of Liability
372
+
373
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
374
+ AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
375
+ PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
376
+ OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
377
+ PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
378
+ WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
379
+ WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
380
+ OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
381
+ PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
382
+ LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
383
+ TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
384
+ AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
385
+ NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
386
+ LIMIT.
387
+
388
+ These exclusions and limitations of liability shall apply
389
+ regardless if NVIDIA or its affiliates have been advised of
390
+ the possibility of such damages, and regardless of whether a
391
+ remedy fails its essential purpose. These exclusions and
392
+ limitations of liability form an essential basis of the
393
+ bargain between the parties, and, absent any of these
394
+ exclusions or limitations of liability, the provisions of this
395
+ Agreement, including, without limitation, the economic terms,
396
+ would be substantially different.
397
+
398
+
399
+ 1.6. Termination
400
+
401
+ 1. This Agreement will continue to apply until terminated by
402
+ either you or NVIDIA as described below.
403
+
404
+ 2. If you want to terminate this Agreement, you may do so by
405
+ stopping to use the SDK.
406
+
407
+ 3. NVIDIA may, at any time, terminate this Agreement if:
408
+
409
+ a. (i) you fail to comply with any term of this
410
+ Agreement and the non-compliance is not fixed within
411
+ thirty (30) days following notice from NVIDIA (or
412
+ immediately if you violate NVIDIA’s intellectual
413
+ property rights);
414
+
415
+ b. (ii) you commence or participate in any legal
416
+ proceeding against NVIDIA with respect to the SDK; or
417
+
418
+ c. (iii) NVIDIA decides to no longer provide the SDK in
419
+ a country or, in NVIDIA’s sole discretion, the
420
+ continued use of it is no longer commercially viable.
421
+
422
+ 4. Upon any termination of this Agreement, you agree to
423
+ promptly discontinue use of the SDK and destroy all copies
424
+ in your possession or control. Your prior distributions in
425
+ accordance with this Agreement are not affected by the
426
+ termination of this Agreement. Upon written request, you
427
+ will certify in writing that you have complied with your
428
+ commitments under this section. Upon any termination of
429
+ this Agreement all provisions survive except for the
430
+ license grant provisions.
431
+
432
+
433
+ 1.7. General
434
+
435
+ If you wish to assign this Agreement or your rights and
436
+ obligations, including by merger, consolidation, dissolution
437
+ or operation of law, contact NVIDIA to ask for permission. Any
438
+ attempted assignment not approved by NVIDIA in writing shall
439
+ be void and of no effect. NVIDIA may assign, delegate or
440
+ transfer this Agreement and its rights and obligations, and if
441
+ to a non-affiliate you will be notified.
442
+
443
+ You agree to cooperate with NVIDIA and provide reasonably
444
+ requested information to verify your compliance with this
445
+ Agreement.
446
+
447
+ This Agreement will be governed in all respects by the laws of
448
+ the United States and of the State of Delaware as those laws
449
+ are applied to contracts entered into and performed entirely
450
+ within Delaware by Delaware residents, without regard to the
451
+ conflicts of laws principles. The United Nations Convention on
452
+ Contracts for the International Sale of Goods is specifically
453
+ disclaimed. You agree to all terms of this Agreement in the
454
+ English language.
455
+
456
+ The state or federal courts residing in Santa Clara County,
457
+ California shall have exclusive jurisdiction over any dispute
458
+ or claim arising out of this Agreement. Notwithstanding this,
459
+ you agree that NVIDIA shall still be allowed to apply for
460
+ injunctive remedies or an equivalent type of urgent legal
461
+ relief in any jurisdiction.
462
+
463
+ If any court of competent jurisdiction determines that any
464
+ provision of this Agreement is illegal, invalid or
465
+ unenforceable, such provision will be construed as limited to
466
+ the extent necessary to be consistent with and fully
467
+ enforceable under the law and the remaining provisions will
468
+ remain in full force and effect. Unless otherwise specified,
469
+ remedies are cumulative.
470
+
471
+ Each party acknowledges and agrees that the other is an
472
+ independent contractor in the performance of this Agreement.
473
+
474
+ The SDK has been developed entirely at private expense and is
475
+ “commercial items” consisting of “commercial computer
476
+ software” and “commercial computer software
477
+ documentation” provided with RESTRICTED RIGHTS. Use,
478
+ duplication or disclosure by the U.S. Government or a U.S.
479
+ Government subcontractor is subject to the restrictions in
480
+ this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
481
+ in subparagraphs (c)(1) and (2) of the Commercial Computer
482
+ Software - Restricted Rights clause at FAR 52.227-19, as
483
+ applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
484
+ Expressway, Santa Clara, CA 95051.
485
+
486
+ The SDK is subject to United States export laws and
487
+ regulations. You agree that you will not ship, transfer or
488
+ export the SDK into any country, or use the SDK in any manner,
489
+ prohibited by the United States Bureau of Industry and
490
+ Security or economic sanctions regulations administered by the
491
+ U.S. Department of Treasury’s Office of Foreign Assets
492
+ Control (OFAC), or any applicable export laws, restrictions or
493
+ regulations. These laws include restrictions on destinations,
494
+ end users and end use. By accepting this Agreement, you
495
+ confirm that you are not a resident or citizen of any country
496
+ currently embargoed by the U.S. and that you are not otherwise
497
+ prohibited from receiving the SDK.
498
+
499
+ Any notice delivered by NVIDIA to you under this Agreement
500
+ will be delivered via mail, email or fax. You agree that any
501
+ notices that NVIDIA sends you electronically will satisfy any
502
+ legal communication requirements. Please direct your legal
503
+ notices or other correspondence to NVIDIA Corporation, 2788
504
+ San Tomas Expressway, Santa Clara, California 95051, United
505
+ States of America, Attention: Legal Department.
506
+
507
+ This Agreement and any exhibits incorporated into this
508
+ Agreement constitute the entire agreement of the parties with
509
+ respect to the subject matter of this Agreement and supersede
510
+ all prior negotiations or documentation exchanged between the
511
+ parties relating to this SDK license. Any additional and/or
512
+ conflicting terms on documents issued by you are null, void,
513
+ and invalid. Any amendment or waiver under this Agreement
514
+ shall be in writing and signed by representatives of both
515
+ parties.
516
+
517
+
518
+ 2. CUDA Toolkit Supplement to Software License Agreement for
519
+ NVIDIA Software Development Kits
520
+ ------------------------------------------------------------
521
+
522
+
523
+ Release date: August 16, 2018
524
+ -----------------------------
525
+
526
+ The terms in this supplement govern your use of the NVIDIA
527
+ CUDA Toolkit SDK under the terms of your license agreement
528
+ (“Agreement”) as modified by this supplement. Capitalized
529
+ terms used but not defined below have the meaning assigned to
530
+ them in the Agreement.
531
+
532
+ This supplement is an exhibit to the Agreement and is
533
+ incorporated as an integral part of the Agreement. In the
534
+ event of conflict between the terms in this supplement and the
535
+ terms in the Agreement, the terms in this supplement govern.
536
+
537
+
538
+ 2.1. License Scope
539
+
540
+ The SDK is licensed for you to develop applications only for
541
+ use in systems with NVIDIA GPUs.
542
+
543
+
544
+ 2.2. Distribution
545
+
546
+ The portions of the SDK that are distributable under the
547
+ Agreement are listed in Attachment A.
548
+
549
+
550
+ 2.3. Operating Systems
551
+
552
+ Those portions of the SDK designed exclusively for use on the
553
+ Linux or FreeBSD operating systems, or other operating systems
554
+ derived from the source code to these operating systems, may
555
+ be copied and redistributed for use in accordance with this
556
+ Agreement, provided that the object code files are not
557
+ modified in any way (except for unzipping of compressed
558
+ files).
559
+
560
+
561
+ 2.4. Audio and Video Encoders and Decoders
562
+
563
+ You acknowledge and agree that it is your sole responsibility
564
+ to obtain any additional third-party licenses required to
565
+ make, have made, use, have used, sell, import, and offer for
566
+ sale your products or services that include or incorporate any
567
+ third-party software and content relating to audio and/or
568
+ video encoders and decoders from, including but not limited
569
+ to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
570
+ MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
571
+ under this Agreement any necessary patent or other rights with
572
+ respect to any audio and/or video encoders and decoders.
573
+
574
+
575
+ 2.5. Licensing
576
+
577
+ If the distribution terms in this Agreement are not suitable
578
+ for your organization, or for any questions regarding this
579
+ Agreement, please contact NVIDIA at
580
+ nvidia-compute-license-questions@nvidia.com.
581
+
582
+
583
+ 2.6. Attachment A
584
+
585
+ The following portions of the SDK are distributable under the
586
+ Agreement:
587
+
588
+ Component
589
+
590
+ CUDA Runtime
591
+
592
+ Windows
593
+
594
+ cudart.dll, cudart_static.lib, cudadevrt.lib
595
+
596
+ Mac OSX
597
+
598
+ libcudart.dylib, libcudart_static.a, libcudadevrt.a
599
+
600
+ Linux
601
+
602
+ libcudart.so, libcudart_static.a, libcudadevrt.a
603
+
604
+ Android
605
+
606
+ libcudart.so, libcudart_static.a, libcudadevrt.a
607
+
608
+ Component
609
+
610
+ CUDA FFT Library
611
+
612
+ Windows
613
+
614
+ cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
615
+
616
+ Mac OSX
617
+
618
+ libcufft.dylib, libcufft_static.a, libcufftw.dylib,
619
+ libcufftw_static.a
620
+
621
+ Linux
622
+
623
+ libcufft.so, libcufft_static.a, libcufftw.so,
624
+ libcufftw_static.a
625
+
626
+ Android
627
+
628
+ libcufft.so, libcufft_static.a, libcufftw.so,
629
+ libcufftw_static.a
630
+
631
+ Component
632
+
633
+ CUDA BLAS Library
634
+
635
+ Windows
636
+
637
+ cublas.dll, cublasLt.dll
638
+
639
+ Mac OSX
640
+
641
+ libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
642
+ libcublasLt_static.a
643
+
644
+ Linux
645
+
646
+ libcublas.so, libcublasLt.so, libcublas_static.a,
647
+ libcublasLt_static.a
648
+
649
+ Android
650
+
651
+ libcublas.so, libcublasLt.so, libcublas_static.a,
652
+ libcublasLt_static.a
653
+
654
+ Component
655
+
656
+ NVIDIA "Drop-in" BLAS Library
657
+
658
+ Windows
659
+
660
+ nvblas.dll
661
+
662
+ Mac OSX
663
+
664
+ libnvblas.dylib
665
+
666
+ Linux
667
+
668
+ libnvblas.so
669
+
670
+ Component
671
+
672
+ CUDA Sparse Matrix Library
673
+
674
+ Windows
675
+
676
+ cusparse.dll, cusparse.lib
677
+
678
+ Mac OSX
679
+
680
+ libcusparse.dylib, libcusparse_static.a
681
+
682
+ Linux
683
+
684
+ libcusparse.so, libcusparse_static.a
685
+
686
+ Android
687
+
688
+ libcusparse.so, libcusparse_static.a
689
+
690
+ Component
691
+
692
+ CUDA Linear Solver Library
693
+
694
+ Windows
695
+
696
+ cusolver.dll, cusolver.lib
697
+
698
+ Mac OSX
699
+
700
+ libcusolver.dylib, libcusolver_static.a
701
+
702
+ Linux
703
+
704
+ libcusolver.so, libcusolver_static.a
705
+
706
+ Android
707
+
708
+ libcusolver.so, libcusolver_static.a
709
+
710
+ Component
711
+
712
+ CUDA Random Number Generation Library
713
+
714
+ Windows
715
+
716
+ curand.dll, curand.lib
717
+
718
+ Mac OSX
719
+
720
+ libcurand.dylib, libcurand_static.a
721
+
722
+ Linux
723
+
724
+ libcurand.so, libcurand_static.a
725
+
726
+ Android
727
+
728
+ libcurand.so, libcurand_static.a
729
+
730
+ Component
731
+
732
+ CUDA Accelerated Graph Library
733
+
734
+ Component
735
+
736
+ NVIDIA Performance Primitives Library
737
+
738
+ Windows
739
+
740
+ nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
741
+ nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
742
+ nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
743
+ nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
744
+ nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
745
+
746
+ Mac OSX
747
+
748
+ libnppc.dylib, libnppc_static.a, libnppial.dylib,
749
+ libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
750
+ libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
751
+ libnppidei_static.a, libnppif.dylib, libnppif_static.a,
752
+ libnppig.dylib, libnppig_static.a, libnppim.dylib,
753
+ libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
754
+ libnpps.dylib, libnpps_static.a
755
+
756
+ Linux
757
+
758
+ libnppc.so, libnppc_static.a, libnppial.so,
759
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
760
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
761
+ libnppidei_static.a, libnppif.so, libnppif_static.a
762
+ libnppig.so, libnppig_static.a, libnppim.so,
763
+ libnppim_static.a, libnppist.so, libnppist_static.a,
764
+ libnppisu.so, libnppisu_static.a, libnppitc.so
765
+ libnppitc_static.a, libnpps.so, libnpps_static.a
766
+
767
+ Android
768
+
769
+ libnppc.so, libnppc_static.a, libnppial.so,
770
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
771
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
772
+ libnppidei_static.a, libnppif.so, libnppif_static.a
773
+ libnppig.so, libnppig_static.a, libnppim.so,
774
+ libnppim_static.a, libnppist.so, libnppist_static.a,
775
+ libnppisu.so, libnppisu_static.a, libnppitc.so
776
+ libnppitc_static.a, libnpps.so, libnpps_static.a
777
+
778
+ Component
779
+
780
+ NVIDIA JPEG Library
781
+
782
+ Linux
783
+
784
+ libnvjpeg.so, libnvjpeg_static.a
785
+
786
+ Component
787
+
788
+ Internal common library required for statically linking to
789
+ cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
790
+
791
+ Mac OSX
792
+
793
+ libculibos.a
794
+
795
+ Linux
796
+
797
+ libculibos.a
798
+
799
+ Component
800
+
801
+ NVIDIA Runtime Compilation Library and Header
802
+
803
+ All
804
+
805
+ nvrtc.h
806
+
807
+ Windows
808
+
809
+ nvrtc.dll, nvrtc-builtins.dll
810
+
811
+ Mac OSX
812
+
813
+ libnvrtc.dylib, libnvrtc-builtins.dylib
814
+
815
+ Linux
816
+
817
+ libnvrtc.so, libnvrtc-builtins.so
818
+
819
+ Component
820
+
821
+ NVIDIA Optimizing Compiler Library
822
+
823
+ Windows
824
+
825
+ nvvm.dll
826
+
827
+ Mac OSX
828
+
829
+ libnvvm.dylib
830
+
831
+ Linux
832
+
833
+ libnvvm.so
834
+
835
+ Component
836
+
837
+ NVIDIA Common Device Math Functions Library
838
+
839
+ Windows
840
+
841
+ libdevice.10.bc
842
+
843
+ Mac OSX
844
+
845
+ libdevice.10.bc
846
+
847
+ Linux
848
+
849
+ libdevice.10.bc
850
+
851
+ Component
852
+
853
+ CUDA Occupancy Calculation Header Library
854
+
855
+ All
856
+
857
+ cuda_occupancy.h
858
+
859
+ Component
860
+
861
+ CUDA Half Precision Headers
862
+
863
+ All
864
+
865
+ cuda_fp16.h, cuda_fp16.hpp
866
+
867
+ Component
868
+
869
+ CUDA Profiling Tools Interface (CUPTI) Library
870
+
871
+ Windows
872
+
873
+ cupti.dll
874
+
875
+ Mac OSX
876
+
877
+ libcupti.dylib
878
+
879
+ Linux
880
+
881
+ libcupti.so
882
+
883
+ Component
884
+
885
+ NVIDIA Tools Extension Library
886
+
887
+ Windows
888
+
889
+ nvToolsExt.dll, nvToolsExt.lib
890
+
891
+ Mac OSX
892
+
893
+ libnvToolsExt.dylib
894
+
895
+ Linux
896
+
897
+ libnvToolsExt.so
898
+
899
+ Component
900
+
901
+ NVIDIA CUDA Driver Libraries
902
+
903
+ Linux
904
+
905
+ libcuda.so, libnvidia-fatbinaryloader.so,
906
+ libnvidia-ptxjitcompiler.so
907
+
908
+ The NVIDIA CUDA Driver Libraries are only distributable in
909
+ applications that meet this criteria:
910
+
911
+ 1. The application was developed starting from a NVIDIA CUDA
912
+ container obtained from Docker Hub or the NVIDIA GPU
913
+ Cloud, and
914
+
915
+ 2. The resulting application is packaged as a Docker
916
+ container and distributed to users on Docker Hub or the
917
+ NVIDIA GPU Cloud only.
918
+
919
+
920
+ 2.7. Attachment B
921
+
922
+
923
+ Additional Licensing Obligations
924
+
925
+ The following third party components included in the SOFTWARE
926
+ are licensed to Licensee pursuant to the following terms and
927
+ conditions:
928
+
929
+ 1. Licensee's use of the GDB third party component is
930
+ subject to the terms and conditions of GNU GPL v3:
931
+
932
+ This product includes copyrighted third-party software licensed
933
+ under the terms of the GNU General Public License v3 ("GPL v3").
934
+ All third-party software packages are copyright by their respective
935
+ authors. GPL v3 terms and conditions are hereby incorporated into
936
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
937
+
938
+ Consistent with these licensing requirements, the software
939
+ listed below is provided under the terms of the specified
940
+ open source software licenses. To obtain source code for
941
+ software provided under licenses that require
942
+ redistribution of source code, including the GNU General
943
+ Public License (GPL) and GNU Lesser General Public License
944
+ (LGPL), contact oss-requests@nvidia.com. This offer is
945
+ valid for a period of three (3) years from the date of the
946
+ distribution of this product by NVIDIA CORPORATION.
947
+
948
+ Component License
949
+ CUDA-GDB GPL v3
950
+
951
+ 2. Licensee represents and warrants that any and all third
952
+ party licensing and/or royalty payment obligations in
953
+ connection with Licensee's use of the H.264 video codecs
954
+ are solely the responsibility of Licensee.
955
+
956
+ 3. Licensee's use of the Thrust library is subject to the
957
+ terms and conditions of the Apache License Version 2.0.
958
+ All third-party software packages are copyright by their
959
+ respective authors. Apache License Version 2.0 terms and
960
+ conditions are hereby incorporated into the Agreement by
961
+ this reference.
962
+ http://www.apache.org/licenses/LICENSE-2.0.html
963
+
964
+ In addition, Licensee acknowledges the following notice:
965
+ Thrust includes source code from the Boost Iterator,
966
+ Tuple, System, and Random Number libraries.
967
+
968
+ Boost Software License - Version 1.0 - August 17th, 2003
969
+ . . . .
970
+
971
+ Permission is hereby granted, free of charge, to any person or
972
+ organization obtaining a copy of the software and accompanying
973
+ documentation covered by this license (the "Software") to use,
974
+ reproduce, display, distribute, execute, and transmit the Software,
975
+ and to prepare derivative works of the Software, and to permit
976
+ third-parties to whom the Software is furnished to do so, all
977
+ subject to the following:
978
+
979
+ The copyright notices in the Software and this entire statement,
980
+ including the above license grant, this restriction and the following
981
+ disclaimer, must be included in all copies of the Software, in whole
982
+ or in part, and all derivative works of the Software, unless such
983
+ copies or derivative works are solely in the form of machine-executable
984
+ object code generated by a source language processor.
985
+
986
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
987
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
988
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
989
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
990
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
991
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
992
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
993
+ OTHER DEALINGS IN THE SOFTWARE.
994
+
995
+ 4. Licensee's use of the LLVM third party component is
996
+ subject to the following terms and conditions:
997
+
998
+ ======================================================
999
+ LLVM Release License
1000
+ ======================================================
1001
+ University of Illinois/NCSA
1002
+ Open Source License
1003
+
1004
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
1005
+ All rights reserved.
1006
+
1007
+ Developed by:
1008
+
1009
+ LLVM Team
1010
+
1011
+ University of Illinois at Urbana-Champaign
1012
+
1013
+ http://llvm.org
1014
+
1015
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1016
+ of this software and associated documentation files (the "Software"), to
1017
+ deal with the Software without restriction, including without limitation the
1018
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1019
+ sell copies of the Software, and to permit persons to whom the Software is
1020
+ furnished to do so, subject to the following conditions:
1021
+
1022
+ * Redistributions of source code must retain the above copyright notice,
1023
+ this list of conditions and the following disclaimers.
1024
+
1025
+ * Redistributions in binary form must reproduce the above copyright
1026
+ notice, this list of conditions and the following disclaimers in the
1027
+ documentation and/or other materials provided with the distribution.
1028
+
1029
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
1030
+ Champaign, nor the names of its contributors may be used to endorse or
1031
+ promote products derived from this Software without specific prior
1032
+ written permission.
1033
+
1034
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1035
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1036
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1037
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1038
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1039
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1040
+ DEALINGS WITH THE SOFTWARE.
1041
+
1042
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
1043
+ component is subject to the following terms and
1044
+ conditions:
1045
+
1046
+ ------------
1047
+ PCRE LICENCE
1048
+ ------------
1049
+ PCRE is a library of functions to support regular expressions whose syntax
1050
+ and semantics are as close as possible to those of the Perl 5 language.
1051
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
1052
+ specified below. The documentation for PCRE, supplied in the "doc"
1053
+ directory, is distributed under the same terms as the software itself. The
1054
+ basic library functions are written in C and are freestanding. Also
1055
+ included in the distribution is a set of C++ wrapper functions, and a just-
1056
+ in-time compiler that can be used to optimize pattern matching. These are
1057
+ both optional features that can be omitted when the library is built.
1058
+
1059
+ THE BASIC LIBRARY FUNCTIONS
1060
+ ---------------------------
1061
+ Written by: Philip Hazel
1062
+ Email local part: ph10
1063
+ Email domain: cam.ac.uk
1064
+ University of Cambridge Computing Service,
1065
+ Cambridge, England.
1066
+ Copyright (c) 1997-2012 University of Cambridge
1067
+ All rights reserved.
1068
+
1069
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
1070
+ -------------------------------------
1071
+ Written by: Zoltan Herczeg
1072
+ Email local part: hzmester
1073
+ Emain domain: freemail.hu
1074
+ Copyright(c) 2010-2012 Zoltan Herczeg
1075
+ All rights reserved.
1076
+
1077
+ STACK-LESS JUST-IN-TIME COMPILER
1078
+ --------------------------------
1079
+ Written by: Zoltan Herczeg
1080
+ Email local part: hzmester
1081
+ Emain domain: freemail.hu
1082
+ Copyright(c) 2009-2012 Zoltan Herczeg
1083
+ All rights reserved.
1084
+
1085
+ THE C++ WRAPPER FUNCTIONS
1086
+ -------------------------
1087
+ Contributed by: Google Inc.
1088
+ Copyright (c) 2007-2012, Google Inc.
1089
+ All rights reserved.
1090
+
1091
+ THE "BSD" LICENCE
1092
+ -----------------
1093
+ Redistribution and use in source and binary forms, with or without
1094
+ modification, are permitted provided that the following conditions are met:
1095
+
1096
+ * Redistributions of source code must retain the above copyright notice,
1097
+ this list of conditions and the following disclaimer.
1098
+
1099
+ * Redistributions in binary form must reproduce the above copyright
1100
+ notice, this list of conditions and the following disclaimer in the
1101
+ documentation and/or other materials provided with the distribution.
1102
+
1103
+ * Neither the name of the University of Cambridge nor the name of Google
1104
+ Inc. nor the names of their contributors may be used to endorse or
1105
+ promote products derived from this software without specific prior
1106
+ written permission.
1107
+
1108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1109
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1110
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1111
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1112
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1113
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1114
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1115
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1116
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1117
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1118
+ POSSIBILITY OF SUCH DAMAGE.
1119
+
1120
+ 6. Some of the cuBLAS library routines were written by or
1121
+ derived from code written by Vasily Volkov and are subject
1122
+ to the Modified Berkeley Software Distribution License as
1123
+ follows:
1124
+
1125
+ Copyright (c) 2007-2009, Regents of the University of California
1126
+
1127
+ All rights reserved.
1128
+
1129
+ Redistribution and use in source and binary forms, with or without
1130
+ modification, are permitted provided that the following conditions are
1131
+ met:
1132
+ * Redistributions of source code must retain the above copyright
1133
+ notice, this list of conditions and the following disclaimer.
1134
+ * Redistributions in binary form must reproduce the above
1135
+ copyright notice, this list of conditions and the following
1136
+ disclaimer in the documentation and/or other materials provided
1137
+ with the distribution.
1138
+ * Neither the name of the University of California, Berkeley nor
1139
+ the names of its contributors may be used to endorse or promote
1140
+ products derived from this software without specific prior
1141
+ written permission.
1142
+
1143
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1144
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1145
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1146
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1147
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1148
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1149
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1150
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1151
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1152
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1153
+ POSSIBILITY OF SUCH DAMAGE.
1154
+
1155
+ 7. Some of the cuBLAS library routines were written by or
1156
+ derived from code written by Davide Barbieri and are
1157
+ subject to the Modified Berkeley Software Distribution
1158
+ License as follows:
1159
+
1160
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
1161
+
1162
+ All rights reserved.
1163
+
1164
+ Redistribution and use in source and binary forms, with or without
1165
+ modification, are permitted provided that the following conditions are
1166
+ met:
1167
+ * Redistributions of source code must retain the above copyright
1168
+ notice, this list of conditions and the following disclaimer.
1169
+ * Redistributions in binary form must reproduce the above
1170
+ copyright notice, this list of conditions and the following
1171
+ disclaimer in the documentation and/or other materials provided
1172
+ with the distribution.
1173
+ * The name of the author may not be used to endorse or promote
1174
+ products derived from this software without specific prior
1175
+ written permission.
1176
+
1177
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1178
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1179
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1180
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1181
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1182
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1183
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1184
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1185
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1186
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1187
+ POSSIBILITY OF SUCH DAMAGE.
1188
+
1189
+ 8. Some of the cuBLAS library routines were derived from
1190
+ code developed by the University of Tennessee and are
1191
+ subject to the Modified Berkeley Software Distribution
1192
+ License as follows:
1193
+
1194
+ Copyright (c) 2010 The University of Tennessee.
1195
+
1196
+ All rights reserved.
1197
+
1198
+ Redistribution and use in source and binary forms, with or without
1199
+ modification, are permitted provided that the following conditions are
1200
+ met:
1201
+ * Redistributions of source code must retain the above copyright
1202
+ notice, this list of conditions and the following disclaimer.
1203
+ * Redistributions in binary form must reproduce the above
1204
+ copyright notice, this list of conditions and the following
1205
+ disclaimer listed in this license in the documentation and/or
1206
+ other materials provided with the distribution.
1207
+ * Neither the name of the copyright holders nor the names of its
1208
+ contributors may be used to endorse or promote products derived
1209
+ from this software without specific prior written permission.
1210
+
1211
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1212
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1213
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1214
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1215
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1216
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1217
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1218
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1219
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1220
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1221
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1222
+
1223
+ 9. Some of the cuBLAS library routines were written by or
1224
+ derived from code written by Jonathan Hogg and are subject
1225
+ to the Modified Berkeley Software Distribution License as
1226
+ follows:
1227
+
1228
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
1229
+
1230
+ All rights reserved.
1231
+
1232
+ Redistribution and use in source and binary forms, with or without
1233
+ modification, are permitted provided that the following conditions are
1234
+ met:
1235
+ * Redistributions of source code must retain the above copyright
1236
+ notice, this list of conditions and the following disclaimer.
1237
+ * Redistributions in binary form must reproduce the above
1238
+ copyright notice, this list of conditions and the following
1239
+ disclaimer in the documentation and/or other materials provided
1240
+ with the distribution.
1241
+ * Neither the name of the STFC nor the names of its contributors
1242
+ may be used to endorse or promote products derived from this
1243
+ software without specific prior written permission.
1244
+
1245
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1246
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1247
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1248
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
1249
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1250
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1251
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1252
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
1253
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
1254
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
1255
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1256
+
1257
+ 10. Some of the cuBLAS library routines were written by or
1258
+ derived from code written by Ahmad M. Abdelfattah, David
1259
+ Keyes, and Hatem Ltaief, and are subject to the Apache
1260
+ License, Version 2.0, as follows:
1261
+
1262
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
1263
+ Authors:
1264
+ Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa)
1265
+ David Keyes (david.keyes@kaust.edu.sa)
1266
+ Hatem Ltaief (hatem.ltaief@kaust.edu.sa)
1267
+
1268
+ Redistribution and use in source and binary forms, with or without
1269
+ modification, are permitted provided that the following conditions
1270
+ are met:
1271
+
1272
+ * Redistributions of source code must retain the above copyright
1273
+ notice, this list of conditions and the following disclaimer.
1274
+ * Redistributions in binary form must reproduce the above copyright
1275
+ notice, this list of conditions and the following disclaimer in the
1276
+ documentation and/or other materials provided with the distribution.
1277
+ * Neither the name of the King Abdullah University of Science and
1278
+ Technology nor the names of its contributors may be used to endorse
1279
+ or promote products derived from this software without specific prior
1280
+ written permission.
1281
+
1282
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1283
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1284
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1285
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1286
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1287
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1288
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1289
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1290
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1291
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1292
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
1293
+
1294
+ 11. Some of the cuSPARSE library routines were written by or
1295
+ derived from code written by Li-Wen Chang and are subject
1296
+ to the NCSA Open Source License as follows:
1297
+
1298
+ Copyright (c) 2012, University of Illinois.
1299
+
1300
+ All rights reserved.
1301
+
1302
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
1303
+
1304
+ Permission is hereby granted, free of charge, to any person obtaining
1305
+ a copy of this software and associated documentation files (the
1306
+ "Software"), to deal with the Software without restriction, including
1307
+ without limitation the rights to use, copy, modify, merge, publish,
1308
+ distribute, sublicense, and/or sell copies of the Software, and to
1309
+ permit persons to whom the Software is furnished to do so, subject to
1310
+ the following conditions:
1311
+ * Redistributions of source code must retain the above copyright
1312
+ notice, this list of conditions and the following disclaimer.
1313
+ * Redistributions in binary form must reproduce the above
1314
+ copyright notice, this list of conditions and the following
1315
+ disclaimers in the documentation and/or other materials provided
1316
+ with the distribution.
1317
+ * Neither the names of IMPACT Group, University of Illinois, nor
1318
+ the names of its contributors may be used to endorse or promote
1319
+ products derived from this Software without specific prior
1320
+ written permission.
1321
+
1322
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1323
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1324
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1325
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
1326
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1327
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
1328
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
1329
+ SOFTWARE.
1330
+
1331
+ 12. Some of the cuRAND library routines were written by or
1332
+ derived from code written by Mutsuo Saito and Makoto
1333
+ Matsumoto and are subject to the following license:
1334
+
1335
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
1336
+ University. All rights reserved.
1337
+
1338
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
1339
+ University and University of Tokyo. All rights reserved.
1340
+
1341
+ Redistribution and use in source and binary forms, with or without
1342
+ modification, are permitted provided that the following conditions are
1343
+ met:
1344
+ * Redistributions of source code must retain the above copyright
1345
+ notice, this list of conditions and the following disclaimer.
1346
+ * Redistributions in binary form must reproduce the above
1347
+ copyright notice, this list of conditions and the following
1348
+ disclaimer in the documentation and/or other materials provided
1349
+ with the distribution.
1350
+ * Neither the name of the Hiroshima University nor the names of
1351
+ its contributors may be used to endorse or promote products
1352
+ derived from this software without specific prior written
1353
+ permission.
1354
+
1355
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1356
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1357
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1358
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1359
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1360
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1361
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1362
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1363
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1364
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1365
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1366
+
1367
+ 13. Some of the cuRAND library routines were derived from
1368
+ code developed by D. E. Shaw Research and are subject to
1369
+ the following license:
1370
+
1371
+ Copyright 2010-2011, D. E. Shaw Research.
1372
+
1373
+ All rights reserved.
1374
+
1375
+ Redistribution and use in source and binary forms, with or without
1376
+ modification, are permitted provided that the following conditions are
1377
+ met:
1378
+ * Redistributions of source code must retain the above copyright
1379
+ notice, this list of conditions, and the following disclaimer.
1380
+ * Redistributions in binary form must reproduce the above
1381
+ copyright notice, this list of conditions, and the following
1382
+ disclaimer in the documentation and/or other materials provided
1383
+ with the distribution.
1384
+ * Neither the name of D. E. Shaw Research nor the names of its
1385
+ contributors may be used to endorse or promote products derived
1386
+ from this software without specific prior written permission.
1387
+
1388
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1389
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1390
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1391
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1392
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1393
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1394
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1395
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1396
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1397
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1398
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1399
+
1400
+ 14. Some of the Math library routines were written by or
1401
+ derived from code developed by Norbert Juffa and are
1402
+ subject to the following license:
1403
+
1404
+ Copyright (c) 2015-2017, Norbert Juffa
1405
+ All rights reserved.
1406
+
1407
+ Redistribution and use in source and binary forms, with or without
1408
+ modification, are permitted provided that the following conditions
1409
+ are met:
1410
+
1411
+ 1. Redistributions of source code must retain the above copyright
1412
+ notice, this list of conditions and the following disclaimer.
1413
+
1414
+ 2. Redistributions in binary form must reproduce the above copyright
1415
+ notice, this list of conditions and the following disclaimer in the
1416
+ documentation and/or other materials provided with the distribution.
1417
+
1418
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1419
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1420
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1421
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1422
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1423
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1424
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1425
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1426
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1427
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1428
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429
+
1430
+ 15. Licensee's use of the lz4 third party component is
1431
+ subject to the following terms and conditions:
1432
+
1433
+ Copyright (C) 2011-2013, Yann Collet.
1434
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1435
+
1436
+ Redistribution and use in source and binary forms, with or without
1437
+ modification, are permitted provided that the following conditions are
1438
+ met:
1439
+
1440
+ * Redistributions of source code must retain the above copyright
1441
+ notice, this list of conditions and the following disclaimer.
1442
+ * Redistributions in binary form must reproduce the above
1443
+ copyright notice, this list of conditions and the following disclaimer
1444
+ in the documentation and/or other materials provided with the
1445
+ distribution.
1446
+
1447
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1448
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1449
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1450
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1451
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1452
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1453
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1454
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1455
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1456
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1457
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1458
+
1459
+ 16. The NPP library uses code from the Boost Math Toolkit,
1460
+ and is subject to the following license:
1461
+
1462
+ Boost Software License - Version 1.0 - August 17th, 2003
1463
+ . . . .
1464
+
1465
+ Permission is hereby granted, free of charge, to any person or
1466
+ organization obtaining a copy of the software and accompanying
1467
+ documentation covered by this license (the "Software") to use,
1468
+ reproduce, display, distribute, execute, and transmit the Software,
1469
+ and to prepare derivative works of the Software, and to permit
1470
+ third-parties to whom the Software is furnished to do so, all
1471
+ subject to the following:
1472
+
1473
+ The copyright notices in the Software and this entire statement,
1474
+ including the above license grant, this restriction and the following
1475
+ disclaimer, must be included in all copies of the Software, in whole
1476
+ or in part, and all derivative works of the Software, unless such
1477
+ copies or derivative works are solely in the form of machine-executable
1478
+ object code generated by a source language processor.
1479
+
1480
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1481
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1482
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
1483
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
1484
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
1485
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
1486
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1487
+ OTHER DEALINGS IN THE SOFTWARE.
1488
+
1489
+ 17. Portions of the Nsight Eclipse Edition is subject to the
1490
+ following license:
1491
+
1492
+ The Eclipse Foundation makes available all content in this plug-in
1493
+ ("Content"). Unless otherwise indicated below, the Content is provided
1494
+ to you under the terms and conditions of the Eclipse Public License
1495
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
1496
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
1497
+ will mean the Content.
1498
+
1499
+ If you did not receive this Content directly from the Eclipse
1500
+ Foundation, the Content is being redistributed by another party
1501
+ ("Redistributor") and different terms and conditions may apply to your
1502
+ use of any object code in the Content. Check the Redistributor's
1503
+ license that was provided with the Content. If no such license exists,
1504
+ contact the Redistributor. Unless otherwise indicated below, the terms
1505
+ and conditions of the EPL still apply to any source code in the
1506
+ Content and such source code may be obtained at http://www.eclipse.org.
1507
+
1508
+ 18. Some of the cuBLAS library routines uses code from
1509
+ OpenAI, which is subject to the following license:
1510
+
1511
+ License URL
1512
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
1513
+
1514
+ License Text
1515
+ The MIT License
1516
+
1517
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
1518
+
1519
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1520
+ of this software and associated documentation files (the "Software"), to deal
1521
+ in the Software without restriction, including without limitation the rights
1522
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1523
+ copies of the Software, and to permit persons to whom the Software is
1524
+ furnished to do so, subject to the following conditions:
1525
+
1526
+ The above copyright notice and this permission notice shall be included in
1527
+ all copies or substantial portions of the Software.
1528
+
1529
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1530
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1531
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1532
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1533
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1534
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1535
+ THE SOFTWARE.
1536
+
1537
+ 19. Licensee's use of the Visual Studio Setup Configuration
1538
+ Samples is subject to the following license:
1539
+
1540
+ The MIT License (MIT)
1541
+ Copyright (C) Microsoft Corporation. All rights reserved.
1542
+
1543
+ Permission is hereby granted, free of charge, to any person
1544
+ obtaining a copy of this software and associated documentation
1545
+ files (the "Software"), to deal in the Software without restriction,
1546
+ including without limitation the rights to use, copy, modify, merge,
1547
+ publish, distribute, sublicense, and/or sell copies of the Software,
1548
+ and to permit persons to whom the Software is furnished to do so,
1549
+ subject to the following conditions:
1550
+
1551
+ The above copyright notice and this permission notice shall be included
1552
+ in all copies or substantial portions of the Software.
1553
+
1554
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1555
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1556
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1557
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1558
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1559
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1560
+
1561
+ 20. Licensee's use of linmath.h header for CPU functions for
1562
+ GL vector/matrix operations from lunarG is subject to the
1563
+ Apache License Version 2.0.
1564
+
1565
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
1566
+ subject to the MIT license .
1567
+
1568
+ -----------------
wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/RECORD ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-310.pyc,,
3
+ nvidia/cusolver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/cusolver/__pycache__/__init__.cpython-310.pyc,,
5
+ nvidia/cusolver/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ nvidia/cusolver/include/__pycache__/__init__.cpython-310.pyc,,
7
+ nvidia/cusolver/include/cusolverDn.h,sha256=8KUcqUxWPr8jpz3ZVpTB6I3IXMme1ok7E7vi9XXKRzk,147406
8
+ nvidia/cusolver/include/cusolverMg.h,sha256=N8989nnS2BleeMyuftbQgBDJ4sMAkLPSnmy_S_7fxng,11549
9
+ nvidia/cusolver/include/cusolverRf.h,sha256=7BZfWeuMJ8w1Pz4iZeGmwvDZbDNNq0ivG5MHtiATtls,14292
10
+ nvidia/cusolver/include/cusolverSp.h,sha256=8fev0XawDBd0xrOxUlQ3WhclKlUuVAT64zKxwnP8iT0,32561
11
+ nvidia/cusolver/include/cusolverSp_LOWLEVEL_PREVIEW.h,sha256=rTuS0rxwGV3bAz50ua59WVPQ9SvlijORj732oPejoCk,37495
12
+ nvidia/cusolver/include/cusolver_common.h,sha256=oyltrdGL5cpIPe3oJWxQ95XEprTPAohOG8XHBB84hRM,8824
13
+ nvidia/cusolver/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ nvidia/cusolver/lib/__pycache__/__init__.cpython-310.pyc,,
15
+ nvidia/cusolver/lib/libcusolver.so.11,sha256=6AWRIxTk0qxMYVazEbN11wRgK7_Mcz1OkxS6FGQ6bd4,234922936
16
+ nvidia/cusolver/lib/libcusolverMg.so.11,sha256=-fxKTTDSdUr_N679R85-NfpI0GDLO2IoTmUZm4utEeE,141988264
17
+ nvidia_cusolver_cu11-11.4.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
18
+ nvidia_cusolver_cu11-11.4.0.1.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
19
+ nvidia_cusolver_cu11-11.4.0.1.dist-info/METADATA,sha256=orEmzZBFkVhXyBgbnGKGbaI0ClyUFfTUhuuG_djbkqY,1551
20
+ nvidia_cusolver_cu11-11.4.0.1.dist-info/RECORD,,
21
+ nvidia_cusolver_cu11-11.4.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ nvidia_cusolver_cu11-11.4.0.1.dist-info/WHEEL,sha256=v6cGNql5q3Lw8M9MsG2Kk4-SoHxxNwGgZHlg0h0twcI,115
23
+ nvidia_cusolver_cu11-11.4.0.1.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/REQUESTED ADDED
File without changes
wemm/lib/python3.10/site-packages/nvidia_cusolver_cu11-11.4.0.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia
wemm/lib/python3.10/site-packages/torchmetrics/aggregation.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import warnings
15
+ from typing import Any, Callable, List, Union
16
+
17
+ import torch
18
+ from torch import Tensor
19
+
20
+ from torchmetrics.metric import Metric
21
+ from torchmetrics.utilities.data import dim_zero_cat
22
+
23
+
24
+ class BaseAggregator(Metric):
25
+ """Base class for aggregation metrics.
26
+
27
+ Args:
28
+ fn: string specifying the reduction function
29
+ default_value: default tensor value to use for the metric state
30
+ nan_strategy: options:
31
+ - ``'error'``: if any `nan` values are encounted will give a RuntimeError
32
+ - ``'warn'``: if any `nan` values are encounted will give a warning and continue
33
+ - ``'ignore'``: all `nan` values are silently removed
34
+ - a float: if a float is provided will impude any `nan` values with this value
35
+
36
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
37
+
38
+ Raises:
39
+ ValueError:
40
+ If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
41
+ """
42
+
43
+ value: Tensor
44
+ is_differentiable = None
45
+ higher_is_better = None
46
+ full_state_update = False
47
+
48
+ def __init__(
49
+ self,
50
+ fn: Union[Callable, str],
51
+ default_value: Union[Tensor, List],
52
+ nan_strategy: Union[str, float] = "error",
53
+ **kwargs: Any,
54
+ ):
55
+ super().__init__(**kwargs)
56
+ allowed_nan_strategy = ("error", "warn", "ignore")
57
+ if nan_strategy not in allowed_nan_strategy and not isinstance(nan_strategy, float):
58
+ raise ValueError(
59
+ f"Arg `nan_strategy` should either be a float or one of {allowed_nan_strategy}"
60
+ f" but got {nan_strategy}."
61
+ )
62
+
63
+ self.nan_strategy = nan_strategy
64
+ self.add_state("value", default=default_value, dist_reduce_fx=fn)
65
+
66
+ def _cast_and_nan_check_input(self, x: Union[float, Tensor]) -> Tensor:
67
+ """Converts input x to a tensor if not already and afterwards checks for nans that either give an error,
68
+ warning or just ignored."""
69
+ if not isinstance(x, Tensor):
70
+ x = torch.as_tensor(x, dtype=torch.float32, device=self.device)
71
+
72
+ nans = torch.isnan(x)
73
+ if nans.any():
74
+ if self.nan_strategy == "error":
75
+ raise RuntimeError("Encounted `nan` values in tensor")
76
+ if self.nan_strategy == "warn":
77
+ warnings.warn("Encounted `nan` values in tensor. Will be removed.", UserWarning)
78
+ x = x[~nans]
79
+ elif self.nan_strategy == "ignore":
80
+ x = x[~nans]
81
+ else:
82
+ x[nans] = self.nan_strategy
83
+
84
+ return x.float()
85
+
86
+ def update(self, value: Union[float, Tensor]) -> None: # type: ignore
87
+ """Overwrite in child class."""
88
+ pass
89
+
90
+ def compute(self) -> Tensor:
91
+ """Compute the aggregated value."""
92
+ return self.value
93
+
94
+
95
+ class MaxMetric(BaseAggregator):
96
+ """Aggregate a stream of value into their maximum value.
97
+
98
+ As input to ``forward`` and ``update`` the metric accepts the following input
99
+
100
+ - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
101
+ arbitary shape ``(...,)``.
102
+
103
+ As output of `forward` and `compute` the metric returns the following output
104
+
105
+ - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated maximum value over all inputs received
106
+
107
+ Args:
108
+ nan_strategy: options:
109
+ - ``'error'``: if any `nan` values are encounted will give a RuntimeError
110
+ - ``'warn'``: if any `nan` values are encounted will give a warning and continue
111
+ - ``'ignore'``: all `nan` values are silently removed
112
+ - a float: if a float is provided will impude any `nan` values with this value
113
+
114
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
115
+
116
+ Raises:
117
+ ValueError:
118
+ If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
119
+
120
+ Example:
121
+ >>> import torch
122
+ >>> from torchmetrics import MaxMetric
123
+ >>> metric = MaxMetric()
124
+ >>> metric.update(1)
125
+ >>> metric.update(torch.tensor([2, 3]))
126
+ >>> metric.compute()
127
+ tensor(3.)
128
+ """
129
+
130
+ full_state_update = True
131
+
132
+ def __init__(
133
+ self,
134
+ nan_strategy: Union[str, float] = "warn",
135
+ **kwargs: Any,
136
+ ):
137
+ super().__init__(
138
+ "max",
139
+ -torch.tensor(float("inf")),
140
+ nan_strategy,
141
+ **kwargs,
142
+ )
143
+
144
+ def update(self, value: Union[float, Tensor]) -> None: # type: ignore
145
+ """Update state with data.
146
+
147
+ Args:
148
+ value: Either a float or tensor containing data. Additional tensor
149
+ dimensions will be flattened
150
+ """
151
+ value = self._cast_and_nan_check_input(value)
152
+ if value.numel(): # make sure tensor not empty
153
+ self.value = torch.max(self.value, torch.max(value))
154
+
155
+
156
+ class MinMetric(BaseAggregator):
157
+ """Aggregate a stream of value into their minimum value.
158
+
159
+ As input to ``forward`` and ``update`` the metric accepts the following input
160
+
161
+ - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
162
+ arbitary shape ``(...,)``.
163
+
164
+ As output of `forward` and `compute` the metric returns the following output
165
+
166
+ - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated minimum value over all inputs received
167
+
168
+ Args:
169
+ nan_strategy: options:
170
+ - ``'error'``: if any `nan` values are encounted will give a RuntimeError
171
+ - ``'warn'``: if any `nan` values are encounted will give a warning and continue
172
+ - ``'ignore'``: all `nan` values are silently removed
173
+ - a float: if a float is provided will impude any `nan` values with this value
174
+
175
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
176
+
177
+ Raises:
178
+ ValueError:
179
+ If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
180
+
181
+ Example:
182
+ >>> import torch
183
+ >>> from torchmetrics import MinMetric
184
+ >>> metric = MinMetric()
185
+ >>> metric.update(1)
186
+ >>> metric.update(torch.tensor([2, 3]))
187
+ >>> metric.compute()
188
+ tensor(1.)
189
+ """
190
+
191
+ full_state_update = True
192
+
193
+ def __init__(
194
+ self,
195
+ nan_strategy: Union[str, float] = "warn",
196
+ **kwargs: Any,
197
+ ):
198
+ super().__init__(
199
+ "min",
200
+ torch.tensor(float("inf")),
201
+ nan_strategy,
202
+ **kwargs,
203
+ )
204
+
205
+ def update(self, value: Union[float, Tensor]) -> None: # type: ignore
206
+ """Update state with data.
207
+
208
+ Args:
209
+ value: Either a float or tensor containing data. Additional tensor
210
+ dimensions will be flattened
211
+ """
212
+ value = self._cast_and_nan_check_input(value)
213
+ if value.numel(): # make sure tensor not empty
214
+ self.value = torch.min(self.value, torch.min(value))
215
+
216
+
217
+ class SumMetric(BaseAggregator):
218
+ """Aggregate a stream of value into their sum.
219
+
220
+ As input to ``forward`` and ``update`` the metric accepts the following input
221
+
222
+ - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
223
+ arbitary shape ``(...,)``.
224
+
225
+ As output of `forward` and `compute` the metric returns the following output
226
+
227
+ - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated sum over all inputs received
228
+
229
+ Args:
230
+ nan_strategy: options:
231
+ - ``'error'``: if any `nan` values are encounted will give a RuntimeError
232
+ - ``'warn'``: if any `nan` values are encounted will give a warning and continue
233
+ - ``'ignore'``: all `nan` values are silently removed
234
+ - a float: if a float is provided will impude any `nan` values with this value
235
+
236
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
237
+
238
+ Raises:
239
+ ValueError:
240
+ If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
241
+
242
+ Example:
243
+ >>> import torch
244
+ >>> from torchmetrics import SumMetric
245
+ >>> metric = SumMetric()
246
+ >>> metric.update(1)
247
+ >>> metric.update(torch.tensor([2, 3]))
248
+ >>> metric.compute()
249
+ tensor(6.)
250
+ """
251
+
252
+ def __init__(
253
+ self,
254
+ nan_strategy: Union[str, float] = "warn",
255
+ **kwargs: Any,
256
+ ):
257
+ super().__init__(
258
+ "sum",
259
+ torch.tensor(0.0),
260
+ nan_strategy,
261
+ **kwargs,
262
+ )
263
+
264
+ def update(self, value: Union[float, Tensor]) -> None: # type: ignore
265
+ """Update state with data.
266
+
267
+ Args:
268
+ value: Either a float or tensor containing data. Additional tensor
269
+ dimensions will be flattened
270
+ """
271
+ value = self._cast_and_nan_check_input(value)
272
+ if value.numel():
273
+ self.value += value.sum()
274
+
275
+
276
+ class CatMetric(BaseAggregator):
277
+ """Concatenate a stream of values.
278
+
279
+ As input to ``forward`` and ``update`` the metric accepts the following input
280
+
281
+ - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
282
+ arbitary shape ``(...,)``.
283
+
284
+ As output of `forward` and `compute` the metric returns the following output
285
+
286
+ - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with concatenated values over all input received
287
+
288
+ Args:
289
+ nan_strategy: options:
290
+ - ``'error'``: if any `nan` values are encounted will give a RuntimeError
291
+ - ``'warn'``: if any `nan` values are encounted will give a warning and continue
292
+ - ``'ignore'``: all `nan` values are silently removed
293
+ - a float: if a float is provided will impude any `nan` values with this value
294
+
295
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
296
+
297
+ Raises:
298
+ ValueError:
299
+ If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
300
+
301
+ Example:
302
+ >>> import torch
303
+ >>> from torchmetrics import CatMetric
304
+ >>> metric = CatMetric()
305
+ >>> metric.update(1)
306
+ >>> metric.update(torch.tensor([2, 3]))
307
+ >>> metric.compute()
308
+ tensor([1., 2., 3.])
309
+ """
310
+
311
+ def __init__(
312
+ self,
313
+ nan_strategy: Union[str, float] = "warn",
314
+ **kwargs: Any,
315
+ ):
316
+ super().__init__("cat", [], nan_strategy, **kwargs)
317
+
318
+ def update(self, value: Union[float, Tensor]) -> None: # type: ignore
319
+ """Update state with data.
320
+
321
+ Args:
322
+ value: Either a float or tensor containing data. Additional tensor
323
+ dimensions will be flattened
324
+ """
325
+ value = self._cast_and_nan_check_input(value)
326
+ if value.numel():
327
+ self.value.append(value)
328
+
329
+ def compute(self) -> Tensor:
330
+ """Compute the aggregated value."""
331
+ if isinstance(self.value, list) and self.value:
332
+ return dim_zero_cat(self.value)
333
+ return self.value
334
+
335
+
336
+ class MeanMetric(BaseAggregator):
337
+ """Aggregate a stream of value into their mean value.
338
+
339
+ As input to ``forward`` and ``update`` the metric accepts the following input
340
+
341
+ - ``value`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float values with
342
+ arbitary shape ``(...,)``.
343
+ - ``weight`` (:class:`~float` or :class:`~torch.Tensor`): a single float or an tensor of float value with
344
+ arbitary shape ``(...,)``. Needs to be broadcastable with the shape of ``value`` tensor.
345
+
346
+ As output of `forward` and `compute` the metric returns the following output
347
+
348
+ - ``agg`` (:class:`~torch.Tensor`): scalar float tensor with aggregated (weighted) mean over all inputs received
349
+
350
+ Args:
351
+ nan_strategy: options:
352
+ - ``'error'``: if any `nan` values are encounted will give a RuntimeError
353
+ - ``'warn'``: if any `nan` values are encounted will give a warning and continue
354
+ - ``'ignore'``: all `nan` values are silently removed
355
+ - a float: if a float is provided will impude any `nan` values with this value
356
+
357
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
358
+
359
+ Raises:
360
+ ValueError:
361
+ If ``nan_strategy`` is not one of ``error``, ``warn``, ``ignore`` or a float
362
+
363
+ Example:
364
+ >>> from torchmetrics import MeanMetric
365
+ >>> metric = MeanMetric()
366
+ >>> metric.update(1)
367
+ >>> metric.update(torch.tensor([2, 3]))
368
+ >>> metric.compute()
369
+ tensor(2.)
370
+ """
371
+
372
+ def __init__(
373
+ self,
374
+ nan_strategy: Union[str, float] = "warn",
375
+ **kwargs: Any,
376
+ ):
377
+ super().__init__(
378
+ "sum",
379
+ torch.tensor(0.0),
380
+ nan_strategy,
381
+ **kwargs,
382
+ )
383
+ self.add_state("weight", default=torch.tensor(0.0), dist_reduce_fx="sum")
384
+
385
+ def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: # type: ignore
386
+ """Update state with data.
387
+
388
+ Args:
389
+ value: Either a float or tensor containing data. Additional tensor
390
+ dimensions will be flattened
391
+ weight: Either a float or tensor containing weights for calculating
392
+ the average. Shape of weight should be able to broadcast with
393
+ the shape of `value`. Default to `1.0` corresponding to simple
394
+ harmonic average.
395
+ """
396
+ value = self._cast_and_nan_check_input(value)
397
+ weight = self._cast_and_nan_check_input(weight)
398
+
399
+ if value.numel() == 0:
400
+ return
401
+ # broadcast weight to value shape
402
+ weight = torch.broadcast_to(weight, value.shape)
403
+ self.value += (value * weight).sum()
404
+ self.weight += weight.sum()
405
+
406
+ def compute(self) -> Tensor:
407
+ """Compute the aggregated value."""
408
+ return self.value / self.weight
wemm/lib/python3.10/site-packages/torchmetrics/classification/__init__.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from torchmetrics.classification.confusion_matrix import ( # isort:skip
15
+ BinaryConfusionMatrix,
16
+ ConfusionMatrix,
17
+ MulticlassConfusionMatrix,
18
+ MultilabelConfusionMatrix,
19
+ )
20
+ from torchmetrics.classification.precision_recall_curve import ( # isort:skip
21
+ PrecisionRecallCurve,
22
+ BinaryPrecisionRecallCurve,
23
+ MulticlassPrecisionRecallCurve,
24
+ MultilabelPrecisionRecallCurve,
25
+ )
26
+ from torchmetrics.classification.stat_scores import ( # isort:skip
27
+ BinaryStatScores,
28
+ MulticlassStatScores,
29
+ MultilabelStatScores,
30
+ StatScores,
31
+ )
32
+ from torchmetrics.classification.accuracy import Accuracy, BinaryAccuracy, MulticlassAccuracy, MultilabelAccuracy
33
+ from torchmetrics.classification.auroc import AUROC, BinaryAUROC, MulticlassAUROC, MultilabelAUROC
34
+ from torchmetrics.classification.average_precision import (
35
+ AveragePrecision,
36
+ BinaryAveragePrecision,
37
+ MulticlassAveragePrecision,
38
+ MultilabelAveragePrecision,
39
+ )
40
+ from torchmetrics.classification.calibration_error import (
41
+ BinaryCalibrationError,
42
+ CalibrationError,
43
+ MulticlassCalibrationError,
44
+ )
45
+ from torchmetrics.classification.cohen_kappa import BinaryCohenKappa, CohenKappa, MulticlassCohenKappa
46
+ from torchmetrics.classification.dice import Dice
47
+ from torchmetrics.classification.exact_match import ExactMatch, MulticlassExactMatch, MultilabelExactMatch
48
+ from torchmetrics.classification.f_beta import (
49
+ BinaryF1Score,
50
+ BinaryFBetaScore,
51
+ F1Score,
52
+ FBetaScore,
53
+ MulticlassF1Score,
54
+ MulticlassFBetaScore,
55
+ MultilabelF1Score,
56
+ MultilabelFBetaScore,
57
+ )
58
+ from torchmetrics.classification.hamming import (
59
+ BinaryHammingDistance,
60
+ HammingDistance,
61
+ MulticlassHammingDistance,
62
+ MultilabelHammingDistance,
63
+ )
64
+ from torchmetrics.classification.hinge import BinaryHingeLoss, HingeLoss, MulticlassHingeLoss
65
+ from torchmetrics.classification.jaccard import (
66
+ BinaryJaccardIndex,
67
+ JaccardIndex,
68
+ MulticlassJaccardIndex,
69
+ MultilabelJaccardIndex,
70
+ )
71
+ from torchmetrics.classification.matthews_corrcoef import (
72
+ BinaryMatthewsCorrCoef,
73
+ MatthewsCorrCoef,
74
+ MulticlassMatthewsCorrCoef,
75
+ MultilabelMatthewsCorrCoef,
76
+ )
77
+ from torchmetrics.classification.precision_recall import (
78
+ BinaryPrecision,
79
+ BinaryRecall,
80
+ MulticlassPrecision,
81
+ MulticlassRecall,
82
+ MultilabelPrecision,
83
+ MultilabelRecall,
84
+ Precision,
85
+ Recall,
86
+ )
87
+ from torchmetrics.classification.ranking import (
88
+ MultilabelCoverageError,
89
+ MultilabelRankingAveragePrecision,
90
+ MultilabelRankingLoss,
91
+ )
92
+ from torchmetrics.classification.recall_at_fixed_precision import (
93
+ BinaryRecallAtFixedPrecision,
94
+ MulticlassRecallAtFixedPrecision,
95
+ MultilabelRecallAtFixedPrecision,
96
+ )
97
+ from torchmetrics.classification.roc import ROC, BinaryROC, MulticlassROC, MultilabelROC
98
+ from torchmetrics.classification.specificity import (
99
+ BinarySpecificity,
100
+ MulticlassSpecificity,
101
+ MultilabelSpecificity,
102
+ Specificity,
103
+ )
104
+
105
+ __all__ = [
106
+ "BinaryConfusionMatrix",
107
+ "ConfusionMatrix",
108
+ "MulticlassConfusionMatrix",
109
+ "MultilabelConfusionMatrix",
110
+ "PrecisionRecallCurve",
111
+ "BinaryPrecisionRecallCurve",
112
+ "MulticlassPrecisionRecallCurve",
113
+ "MultilabelPrecisionRecallCurve",
114
+ "BinaryStatScores",
115
+ "MulticlassStatScores",
116
+ "MultilabelStatScores",
117
+ "StatScores",
118
+ "Accuracy",
119
+ "BinaryAccuracy",
120
+ "MulticlassAccuracy",
121
+ "MultilabelAccuracy",
122
+ "AUROC",
123
+ "BinaryAUROC",
124
+ "MulticlassAUROC",
125
+ "MultilabelAUROC",
126
+ "AveragePrecision",
127
+ "BinaryAveragePrecision",
128
+ "MulticlassAveragePrecision",
129
+ "MultilabelAveragePrecision",
130
+ "BinnedAveragePrecision",
131
+ "BinnedPrecisionRecallCurve",
132
+ "BinnedRecallAtFixedPrecision",
133
+ "BinaryCalibrationError",
134
+ "CalibrationError",
135
+ "MulticlassCalibrationError",
136
+ "BinaryCohenKappa",
137
+ "CohenKappa",
138
+ "MulticlassCohenKappa",
139
+ "Dice",
140
+ "ExactMatch",
141
+ "MulticlassExactMatch",
142
+ "MultilabelExactMatch",
143
+ "BinaryF1Score",
144
+ "BinaryFBetaScore",
145
+ "F1Score",
146
+ "FBetaScore",
147
+ "MulticlassF1Score",
148
+ "MulticlassFBetaScore",
149
+ "MultilabelF1Score",
150
+ "MultilabelFBetaScore",
151
+ "BinaryHammingDistance",
152
+ "HammingDistance",
153
+ "MulticlassHammingDistance",
154
+ "MultilabelHammingDistance",
155
+ "BinaryHingeLoss",
156
+ "HingeLoss",
157
+ "MulticlassHingeLoss",
158
+ "BinaryJaccardIndex",
159
+ "JaccardIndex",
160
+ "MulticlassJaccardIndex",
161
+ "MultilabelJaccardIndex",
162
+ "BinaryMatthewsCorrCoef",
163
+ "MatthewsCorrCoef",
164
+ "MulticlassMatthewsCorrCoef",
165
+ "MultilabelMatthewsCorrCoef",
166
+ "BinaryPrecision",
167
+ "BinaryRecall",
168
+ "MulticlassPrecision",
169
+ "MulticlassRecall",
170
+ "MultilabelPrecision",
171
+ "MultilabelRecall",
172
+ "Precision",
173
+ "Recall",
174
+ "CoverageError",
175
+ "LabelRankingAveragePrecision",
176
+ "LabelRankingLoss",
177
+ "MultilabelCoverageError",
178
+ "MultilabelRankingAveragePrecision",
179
+ "MultilabelRankingLoss",
180
+ "BinaryRecallAtFixedPrecision",
181
+ "MulticlassRecallAtFixedPrecision",
182
+ "MultilabelRecallAtFixedPrecision",
183
+ "ROC",
184
+ "BinaryROC",
185
+ "MulticlassROC",
186
+ "MultilabelROC",
187
+ "BinarySpecificity",
188
+ "MulticlassSpecificity",
189
+ "MultilabelSpecificity",
190
+ "Specificity",
191
+ ]
wemm/lib/python3.10/site-packages/torchmetrics/classification/auroc.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, List, Optional, Union
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.classification.precision_recall_curve import (
21
+ BinaryPrecisionRecallCurve,
22
+ MulticlassPrecisionRecallCurve,
23
+ MultilabelPrecisionRecallCurve,
24
+ )
25
+ from torchmetrics.functional.classification.auroc import (
26
+ _binary_auroc_arg_validation,
27
+ _binary_auroc_compute,
28
+ _multiclass_auroc_arg_validation,
29
+ _multiclass_auroc_compute,
30
+ _multilabel_auroc_arg_validation,
31
+ _multilabel_auroc_compute,
32
+ )
33
+ from torchmetrics.metric import Metric
34
+ from torchmetrics.utilities.data import dim_zero_cat
35
+
36
+
37
+ class BinaryAUROC(BinaryPrecisionRecallCurve):
38
+ r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for binary tasks. The AUROC
39
+ score summarizes the ROC curve into an single number that describes the performance of a model for multiple
40
+ thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
41
+ corresponds to random guessing.
42
+
43
+ As input to ``forward`` and ``update`` the metric accepts the following input:
44
+
45
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for
46
+ each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
47
+ sigmoid per element.
48
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
49
+ therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the
50
+ positive class.
51
+
52
+ As output to ``forward`` and ``compute`` the metric returns the following output:
53
+
54
+ - ``b_auroc`` (:class:`~torch.Tensor`): A single scalar with the auroc score.
55
+
56
+ Additional dimension ``...`` will be flattened into the batch dimension.
57
+
58
+ The implementation both supports calculating the metric in a non-binned but accurate version and a
59
+ binned version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will
60
+ activate the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the
61
+ `thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
62
+ size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
63
+
64
+ Args:
65
+ max_fpr: If not ``None``, calculates standardized partial AUC over the range ``[0, max_fpr]``.
66
+ thresholds:
67
+ Can be one of:
68
+
69
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
70
+ all the data. Most accurate but also most memory consuming approach.
71
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
72
+ 0 to 1 as bins for the calculation.
73
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
74
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
75
+ bins for the calculation.
76
+
77
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
78
+ Set to ``False`` for faster computations.
79
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
80
+
81
+ Example:
82
+ >>> from torchmetrics.classification import BinaryAUROC
83
+ >>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
84
+ >>> target = torch.tensor([0, 1, 1, 0])
85
+ >>> metric = BinaryAUROC(thresholds=None)
86
+ >>> metric(preds, target)
87
+ tensor(0.5000)
88
+ >>> b_auroc = BinaryAUROC(thresholds=5)
89
+ >>> b_auroc(preds, target)
90
+ tensor(0.5000)
91
+ """
92
+ is_differentiable: bool = False
93
+ higher_is_better: Optional[bool] = None
94
+ full_state_update: bool = False
95
+
96
+ def __init__(
97
+ self,
98
+ max_fpr: Optional[float] = None,
99
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
100
+ ignore_index: Optional[int] = None,
101
+ validate_args: bool = True,
102
+ **kwargs: Any,
103
+ ) -> None:
104
+ super().__init__(thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs)
105
+ if validate_args:
106
+ _binary_auroc_arg_validation(max_fpr, thresholds, ignore_index)
107
+ self.max_fpr = max_fpr
108
+
109
+ def compute(self) -> Tensor:
110
+ if self.thresholds is None:
111
+ state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)]
112
+ else:
113
+ state = self.confmat
114
+ return _binary_auroc_compute(state, self.thresholds, self.max_fpr)
115
+
116
+
117
+ class MulticlassAUROC(MulticlassPrecisionRecallCurve):
118
+ r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multiclass tasks. The AUROC
119
+ score summarizes the ROC curve into an single number that describes the performance of a model for multiple
120
+ thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
121
+ corresponds to random guessing.
122
+
123
+ As input to ``forward`` and ``update`` the metric accepts the following input:
124
+
125
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
126
+ for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
127
+ apply softmax per sample.
128
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
129
+ therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
130
+
131
+ As output to ``forward`` and ``compute`` the metric returns the following output:
132
+
133
+ - ``mc_auroc`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will
134
+ be returned with auroc score per class. If `average="macro"|"weighted"` then a single scalar is returned.
135
+
136
+ Additional dimension ``...`` will be flattened into the batch dimension.
137
+
138
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
139
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
140
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
141
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
142
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
143
+
144
+ Args:
145
+ num_classes: Integer specifing the number of classes
146
+ average:
147
+ Defines the reduction that is applied over classes. Should be one of the following:
148
+
149
+ - ``macro``: Calculate score for each class and average them
150
+ - ``weighted``: Calculates score for each class and computes weighted average using their support
151
+ - ``"none"`` or ``None``: Calculates score for each class and applies no reduction
152
+
153
+ thresholds:
154
+ Can be one of:
155
+
156
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
157
+ all the data. Most accurate but also most memory consuming approach.
158
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
159
+ 0 to 1 as bins for the calculation.
160
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
161
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
162
+ bins for the calculation.
163
+
164
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
165
+ Set to ``False`` for faster computations.
166
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
167
+
168
+ Example:
169
+ >>> from torchmetrics.classification import MulticlassAUROC
170
+ >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
171
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
172
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
173
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
174
+ >>> target = torch.tensor([0, 1, 3, 2])
175
+ >>> metric = MulticlassAUROC(num_classes=5, average="macro", thresholds=None)
176
+ >>> metric(preds, target)
177
+ tensor(0.5333)
178
+ >>> mc_auroc = MulticlassAUROC(num_classes=5, average=None, thresholds=None)
179
+ >>> mc_auroc(preds, target)
180
+ tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
181
+ >>> mc_auroc = MulticlassAUROC(num_classes=5, average="macro", thresholds=5)
182
+ >>> mc_auroc(preds, target)
183
+ tensor(0.5333)
184
+ >>> mc_auroc = MulticlassAUROC(num_classes=5, average=None, thresholds=5)
185
+ >>> mc_auroc(preds, target)
186
+ tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
187
+ """
188
+
189
+ is_differentiable: bool = False
190
+ higher_is_better: Optional[bool] = None
191
+ full_state_update: bool = False
192
+
193
+ def __init__(
194
+ self,
195
+ num_classes: int,
196
+ average: Optional[Literal["macro", "weighted", "none"]] = "macro",
197
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
198
+ ignore_index: Optional[int] = None,
199
+ validate_args: bool = True,
200
+ **kwargs: Any,
201
+ ) -> None:
202
+ super().__init__(
203
+ num_classes=num_classes, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
204
+ )
205
+ if validate_args:
206
+ _multiclass_auroc_arg_validation(num_classes, average, thresholds, ignore_index)
207
+ self.average = average
208
+ self.validate_args = validate_args
209
+
210
+ def compute(self) -> Tensor:
211
+ if self.thresholds is None:
212
+ state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)]
213
+ else:
214
+ state = self.confmat
215
+ return _multiclass_auroc_compute(state, self.num_classes, self.average, self.thresholds)
216
+
217
+
218
+ class MultilabelAUROC(MultilabelPrecisionRecallCurve):
219
+ r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multilabel tasks. The AUROC
220
+ score summarizes the ROC curve into an single number that describes the performance of a model for multiple
221
+ thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
222
+ corresponds to random guessing.
223
+
224
+ As input to ``forward`` and ``update`` the metric accepts the following input:
225
+
226
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
227
+ for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
228
+ apply sigmoid per element.
229
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` containing ground truth labels, and
230
+ therefore only contain {0,1} values (except if `ignore_index` is specified).
231
+
232
+ As output to ``forward`` and ``compute`` the metric returns the following output:
233
+
234
+ - ``ml_auroc`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will
235
+ be returned with auroc score per class. If `average="micro|macro"|"weighted"` then a single scalar is returned.
236
+
237
+ Additional dimension ``...`` will be flattened into the batch dimension.
238
+
239
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
240
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
241
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
242
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
243
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
244
+
245
+ Args:
246
+ num_labels: Integer specifing the number of labels
247
+ average:
248
+ Defines the reduction that is applied over labels. Should be one of the following:
249
+
250
+ - ``micro``: Sum score over all labels
251
+ - ``macro``: Calculate score for each label and average them
252
+ - ``weighted``: Calculates score for each label and computes weighted average using their support
253
+ - ``"none"`` or ``None``: Calculates score for each label and applies no reduction
254
+ thresholds:
255
+ Can be one of:
256
+
257
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
258
+ all the data. Most accurate but also most memory consuming approach.
259
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
260
+ 0 to 1 as bins for the calculation.
261
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
262
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
263
+ bins for the calculation.
264
+
265
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
266
+ Set to ``False`` for faster computations.
267
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
268
+
269
+ Example:
270
+ >>> from torchmetrics.classification import MultilabelAUROC
271
+ >>> preds = torch.tensor([[0.75, 0.05, 0.35],
272
+ ... [0.45, 0.75, 0.05],
273
+ ... [0.05, 0.55, 0.75],
274
+ ... [0.05, 0.65, 0.05]])
275
+ >>> target = torch.tensor([[1, 0, 1],
276
+ ... [0, 0, 0],
277
+ ... [0, 1, 1],
278
+ ... [1, 1, 1]])
279
+ >>> ml_auroc = MultilabelAUROC(num_labels=3, average="macro", thresholds=None)
280
+ >>> ml_auroc(preds, target)
281
+ tensor(0.6528)
282
+ >>> ml_auroc = MultilabelAUROC(num_labels=3, average=None, thresholds=None)
283
+ >>> ml_auroc(preds, target)
284
+ tensor([0.6250, 0.5000, 0.8333])
285
+ >>> ml_auroc = MultilabelAUROC(num_labels=3, average="macro", thresholds=5)
286
+ >>> ml_auroc(preds, target)
287
+ tensor(0.6528)
288
+ >>> ml_auroc = MultilabelAUROC(num_labels=3, average=None, thresholds=5)
289
+ >>> ml_auroc(preds, target)
290
+ tensor([0.6250, 0.5000, 0.8333])
291
+ """
292
+ is_differentiable: bool = False
293
+ higher_is_better: Optional[bool] = None
294
+ full_state_update: bool = False
295
+
296
+ def __init__(
297
+ self,
298
+ num_labels: int,
299
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
300
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
301
+ ignore_index: Optional[int] = None,
302
+ validate_args: bool = True,
303
+ **kwargs: Any,
304
+ ) -> None:
305
+ super().__init__(
306
+ num_labels=num_labels, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
307
+ )
308
+ if validate_args:
309
+ _multilabel_auroc_arg_validation(num_labels, average, thresholds, ignore_index)
310
+ self.average = average
311
+ self.validate_args = validate_args
312
+
313
+ def compute(self) -> Tensor:
314
+ if self.thresholds is None:
315
+ state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)]
316
+ else:
317
+ state = self.confmat
318
+ return _multilabel_auroc_compute(state, self.num_labels, self.average, self.thresholds, self.ignore_index)
319
+
320
+
321
+ class AUROC:
322
+ r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_). The AUROC score summarizes the
323
+ ROC curve into an single number that describes the performance of a model for multiple thresholds at the same
324
+ time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 corresponds to random guessing.
325
+
326
+ This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the
327
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
328
+ :mod:`BinaryAUROC`, :mod:`MulticlassAUROC` and :mod:`MultilabelAUROC` for the specific details of
329
+ each argument influence and examples.
330
+
331
+ Legacy Example:
332
+ >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])
333
+ >>> target = torch.tensor([0, 0, 1, 1, 1])
334
+ >>> auroc = AUROC(task="binary")
335
+ >>> auroc(preds, target)
336
+ tensor(0.5000)
337
+
338
+ >>> preds = torch.tensor([[0.90, 0.05, 0.05],
339
+ ... [0.05, 0.90, 0.05],
340
+ ... [0.05, 0.05, 0.90],
341
+ ... [0.85, 0.05, 0.10],
342
+ ... [0.10, 0.10, 0.80]])
343
+ >>> target = torch.tensor([0, 1, 1, 2, 2])
344
+ >>> auroc = AUROC(task="multiclass", num_classes=3)
345
+ >>> auroc(preds, target)
346
+ tensor(0.7778)
347
+ """
348
+
349
+ def __new__(
350
+ cls,
351
+ task: Literal["binary", "multiclass", "multilabel"],
352
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
353
+ num_classes: Optional[int] = None,
354
+ num_labels: Optional[int] = None,
355
+ average: Optional[Literal["macro", "weighted", "none"]] = "macro",
356
+ max_fpr: Optional[float] = None,
357
+ ignore_index: Optional[int] = None,
358
+ validate_args: bool = True,
359
+ **kwargs: Any,
360
+ ) -> Metric:
361
+ kwargs.update(dict(thresholds=thresholds, ignore_index=ignore_index, validate_args=validate_args))
362
+ if task == "binary":
363
+ return BinaryAUROC(max_fpr, **kwargs)
364
+ if task == "multiclass":
365
+ assert isinstance(num_classes, int)
366
+ return MulticlassAUROC(num_classes, average, **kwargs)
367
+ if task == "multilabel":
368
+ assert isinstance(num_labels, int)
369
+ return MultilabelAUROC(num_labels, average, **kwargs)
370
+ raise ValueError(
371
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
372
+ )
wemm/lib/python3.10/site-packages/torchmetrics/classification/average_precision.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, List, Optional, Union
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.classification.precision_recall_curve import (
21
+ BinaryPrecisionRecallCurve,
22
+ MulticlassPrecisionRecallCurve,
23
+ MultilabelPrecisionRecallCurve,
24
+ )
25
+ from torchmetrics.functional.classification.average_precision import (
26
+ _binary_average_precision_compute,
27
+ _multiclass_average_precision_arg_validation,
28
+ _multiclass_average_precision_compute,
29
+ _multilabel_average_precision_arg_validation,
30
+ _multilabel_average_precision_compute,
31
+ )
32
+ from torchmetrics.metric import Metric
33
+ from torchmetrics.utilities.data import dim_zero_cat
34
+
35
+
36
+ class BinaryAveragePrecision(BinaryPrecisionRecallCurve):
37
+ r"""Computes the average precision (AP) score for binary tasks. The AP score summarizes a precision-recall curve
38
+ as an weighted mean of precisions at each threshold, with the difference in recall from the previous threshold
39
+ as weight:
40
+
41
+ .. math::
42
+ AP = \sum_{n} (R_n - R_{n-1}) P_n
43
+
44
+ where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
45
+ equivalent to the area under the precision-recall curve (AUPRC).
46
+
47
+ As input to ``forward`` and ``update`` the metric accepts the following input:
48
+
49
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for
50
+ each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
51
+ sigmoid per element.
52
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
53
+ therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the
54
+ positive class.
55
+
56
+ As output to ``forward`` and ``compute`` the metric returns the following output:
57
+
58
+ - ``bap`` (:class:`~torch.Tensor`): A single scalar with the average precision score
59
+
60
+ Additional dimension ``...`` will be flattened into the batch dimension.
61
+
62
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
63
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
64
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
65
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
66
+ size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
67
+
68
+ Args:
69
+ thresholds:
70
+ Can be one of:
71
+
72
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
73
+ all the data. Most accurate but also most memory consuming approach.
74
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
75
+ 0 to 1 as bins for the calculation.
76
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
77
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
78
+ bins for the calculation.
79
+
80
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
81
+ Set to ``False`` for faster computations.
82
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
83
+
84
+ Example:
85
+ >>> from torchmetrics.classification import BinaryAveragePrecision
86
+ >>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
87
+ >>> target = torch.tensor([0, 1, 1, 0])
88
+ >>> metric = BinaryAveragePrecision(thresholds=None)
89
+ >>> metric(preds, target)
90
+ tensor(0.5833)
91
+ >>> bap = BinaryAveragePrecision(thresholds=5)
92
+ >>> bap(preds, target)
93
+ tensor(0.6667)
94
+ """
95
+ is_differentiable: bool = False
96
+ higher_is_better: Optional[bool] = None
97
+ full_state_update: bool = False
98
+
99
+ def compute(self) -> Tensor:
100
+ if self.thresholds is None:
101
+ state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)]
102
+ else:
103
+ state = self.confmat
104
+ return _binary_average_precision_compute(state, self.thresholds)
105
+
106
+
107
+ class MulticlassAveragePrecision(MulticlassPrecisionRecallCurve):
108
+ r"""Computes the average precision (AP) score for binary tasks. The AP score summarizes a precision-recall curve
109
+ as an weighted mean of precisions at each threshold, with the difference in recall from the previous threshold
110
+ as weight:
111
+
112
+ .. math::
113
+ AP = \sum_{n} (R_n - R_{n-1}) P_n
114
+
115
+ where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
116
+ equivalent to the area under the precision-recall curve (AUPRC).
117
+
118
+ As input to ``forward`` and ``update`` the metric accepts the following input:
119
+
120
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
121
+ for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
122
+ apply softmax per sample.
123
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
124
+ therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
125
+
126
+ As output to ``forward`` and ``compute`` the metric returns the following output:
127
+
128
+ - ``mcap`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be
129
+ returned with AP score per class. If `average="macro"|"weighted"` then a single scalar is returned.
130
+
131
+ Additional dimension ``...`` will be flattened into the batch dimension.
132
+
133
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
134
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
135
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
136
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
137
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
138
+
139
+ Args:
140
+ num_classes: Integer specifing the number of classes
141
+ average:
142
+ Defines the reduction that is applied over classes. Should be one of the following:
143
+
144
+ - ``macro``: Calculate score for each class and average them
145
+ - ``weighted``: Calculates score for each class and computes weighted average using their support
146
+ - ``"none"`` or ``None``: Calculates score for each class and applies no reduction
147
+ thresholds:
148
+ Can be one of:
149
+
150
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
151
+ all the data. Most accurate but also most memory consuming approach.
152
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
153
+ 0 to 1 as bins for the calculation.
154
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
155
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
156
+ bins for the calculation.
157
+
158
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
159
+ Set to ``False`` for faster computations.
160
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
161
+
162
+ Example:
163
+ >>> from torchmetrics.classification import MulticlassAveragePrecision
164
+ >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
165
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
166
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
167
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
168
+ >>> target = torch.tensor([0, 1, 3, 2])
169
+ >>> metric = MulticlassAveragePrecision(num_classes=5, average="macro", thresholds=None)
170
+ >>> metric(preds, target)
171
+ tensor(0.6250)
172
+ >>> mcap = MulticlassAveragePrecision(num_classes=5, average=None, thresholds=None)
173
+ >>> mcap(preds, target)
174
+ tensor([1.0000, 1.0000, 0.2500, 0.2500, nan])
175
+ >>> mcap = MulticlassAveragePrecision(num_classes=5, average="macro", thresholds=5)
176
+ >>> mcap(preds, target)
177
+ tensor(0.5000)
178
+ >>> mcap = MulticlassAveragePrecision(num_classes=5, average=None, thresholds=5)
179
+ >>> mcap(preds, target)
180
+ tensor([1.0000, 1.0000, 0.2500, 0.2500, -0.0000])
181
+ """
182
+
183
+ is_differentiable: bool = False
184
+ higher_is_better: Optional[bool] = None
185
+ full_state_update: bool = False
186
+
187
+ def __init__(
188
+ self,
189
+ num_classes: int,
190
+ average: Optional[Literal["macro", "weighted", "none"]] = "macro",
191
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
192
+ ignore_index: Optional[int] = None,
193
+ validate_args: bool = True,
194
+ **kwargs: Any,
195
+ ) -> None:
196
+ super().__init__(
197
+ num_classes=num_classes, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
198
+ )
199
+ if validate_args:
200
+ _multiclass_average_precision_arg_validation(num_classes, average, thresholds, ignore_index)
201
+ self.average = average
202
+ self.validate_args = validate_args
203
+
204
+ def compute(self) -> Tensor:
205
+ if self.thresholds is None:
206
+ state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)]
207
+ else:
208
+ state = self.confmat
209
+ return _multiclass_average_precision_compute(state, self.num_classes, self.average, self.thresholds)
210
+
211
+
212
+ class MultilabelAveragePrecision(MultilabelPrecisionRecallCurve):
213
+ r"""Computes the average precision (AP) score for binary tasks. The AP score summarizes a precision-recall curve
214
+ as an weighted mean of precisions at each threshold, with the difference in recall from the previous threshold
215
+ as weight:
216
+
217
+ .. math::
218
+ AP = \sum_{n} (R_n - R_{n-1}) P_n
219
+
220
+ where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
221
+ equivalent to the area under the precision-recall curve (AUPRC).
222
+
223
+ As input to ``forward`` and ``update`` the metric accepts the following input:
224
+
225
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
226
+ for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
227
+ apply sigmoid per element.
228
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` containing ground truth labels, and
229
+ therefore only contain {0,1} values (except if `ignore_index` is specified).
230
+
231
+ As output to ``forward`` and ``compute`` the metric returns the following output:
232
+
233
+ - ``mlap`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be
234
+ returned with AP score per class. If `average="micro|macro"|"weighted"` then a single scalar is returned.
235
+
236
+ Additional dimension ``...`` will be flattened into the batch dimension.
237
+
238
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned
239
+ version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate
240
+ the non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the
241
+ `thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
242
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
243
+
244
+ Args:
245
+ num_labels: Integer specifing the number of labels
246
+ average:
247
+ Defines the reduction that is applied over labels. Should be one of the following:
248
+
249
+ - ``micro``: Sum score over all labels
250
+ - ``macro``: Calculate score for each label and average them
251
+ - ``weighted``: Calculates score for each label and computes weighted average using their support
252
+ - ``"none"`` or ``None``: Calculates score for each label and applies no reduction
253
+ thresholds:
254
+ Can be one of:
255
+
256
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
257
+ all the data. Most accurate but also most memory consuming approach.
258
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
259
+ 0 to 1 as bins for the calculation.
260
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
261
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
262
+ bins for the calculation.
263
+
264
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
265
+ Set to ``False`` for faster computations.
266
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
267
+
268
+ Example:
269
+ >>> from torchmetrics.classification import MultilabelAveragePrecision
270
+ >>> preds = torch.tensor([[0.75, 0.05, 0.35],
271
+ ... [0.45, 0.75, 0.05],
272
+ ... [0.05, 0.55, 0.75],
273
+ ... [0.05, 0.65, 0.05]])
274
+ >>> target = torch.tensor([[1, 0, 1],
275
+ ... [0, 0, 0],
276
+ ... [0, 1, 1],
277
+ ... [1, 1, 1]])
278
+ >>> metric = MultilabelAveragePrecision(num_labels=3, average="macro", thresholds=None)
279
+ >>> metric(preds, target)
280
+ tensor(0.7500)
281
+ >>> mlap = MultilabelAveragePrecision(num_labels=3, average=None, thresholds=None)
282
+ >>> mlap(preds, target)
283
+ tensor([0.7500, 0.5833, 0.9167])
284
+ >>> mlap = MultilabelAveragePrecision(num_labels=3, average="macro", thresholds=5)
285
+ >>> mlap(preds, target)
286
+ tensor(0.7778)
287
+ >>> mlap = MultilabelAveragePrecision(num_labels=3, average=None, thresholds=5)
288
+ >>> mlap(preds, target)
289
+ tensor([0.7500, 0.6667, 0.9167])
290
+ """
291
+ is_differentiable: bool = False
292
+ higher_is_better: Optional[bool] = None
293
+ full_state_update: bool = False
294
+
295
+ def __init__(
296
+ self,
297
+ num_labels: int,
298
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
299
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
300
+ ignore_index: Optional[int] = None,
301
+ validate_args: bool = True,
302
+ **kwargs: Any,
303
+ ) -> None:
304
+ super().__init__(
305
+ num_labels=num_labels, thresholds=thresholds, ignore_index=ignore_index, validate_args=False, **kwargs
306
+ )
307
+ if validate_args:
308
+ _multilabel_average_precision_arg_validation(num_labels, average, thresholds, ignore_index)
309
+ self.average = average
310
+ self.validate_args = validate_args
311
+
312
+ def compute(self) -> Tensor:
313
+ if self.thresholds is None:
314
+ state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)]
315
+ else:
316
+ state = self.confmat
317
+ return _multilabel_average_precision_compute(
318
+ state, self.num_labels, self.average, self.thresholds, self.ignore_index
319
+ )
320
+
321
+
322
+ class AveragePrecision:
323
+ r"""Computes the average precision (AP) score. The AP score summarizes a precision-recall curve as an weighted
324
+ mean of precisions at each threshold, with the difference in recall from the previous threshold as weight:
325
+
326
+ .. math::
327
+ AP = \sum_{n} (R_n - R_{n-1}) P_n
328
+
329
+ where :math:`P_n, R_n` is the respective precision and recall at threshold index :math:`n`. This value is
330
+ equivalent to the area under the precision-recall curve (AUPRC).
331
+
332
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
333
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
334
+ :mod:`BinaryAveragePrecision`, :mod:`MulticlassAveragePrecision` and :mod:`MultilabelAveragePrecision`
335
+ for the specific details of each argument influence and examples.
336
+
337
+ Legacy Example:
338
+ >>> pred = torch.tensor([0, 0.1, 0.8, 0.4])
339
+ >>> target = torch.tensor([0, 1, 1, 1])
340
+ >>> average_precision = AveragePrecision(task="binary")
341
+ >>> average_precision(pred, target)
342
+ tensor(1.)
343
+
344
+ >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
345
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
346
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
347
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
348
+ >>> target = torch.tensor([0, 1, 3, 2])
349
+ >>> average_precision = AveragePrecision(task="multiclass", num_classes=5, average=None)
350
+ >>> average_precision(pred, target)
351
+ tensor([1.0000, 1.0000, 0.2500, 0.2500, nan])
352
+ """
353
+
354
+ def __new__(
355
+ cls,
356
+ task: Literal["binary", "multiclass", "multilabel"],
357
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
358
+ num_classes: Optional[int] = None,
359
+ num_labels: Optional[int] = None,
360
+ average: Optional[Literal["macro", "weighted", "none"]] = "macro",
361
+ ignore_index: Optional[int] = None,
362
+ validate_args: bool = True,
363
+ **kwargs: Any,
364
+ ) -> Metric:
365
+ kwargs.update(dict(thresholds=thresholds, ignore_index=ignore_index, validate_args=validate_args))
366
+ if task == "binary":
367
+ return BinaryAveragePrecision(**kwargs)
368
+ if task == "multiclass":
369
+ assert isinstance(num_classes, int)
370
+ return MulticlassAveragePrecision(num_classes, average, **kwargs)
371
+ if task == "multilabel":
372
+ assert isinstance(num_labels, int)
373
+ return MultilabelAveragePrecision(num_labels, average, **kwargs)
374
+ raise ValueError(
375
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
376
+ )
wemm/lib/python3.10/site-packages/torchmetrics/classification/calibration_error.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.calibration_error import (
21
+ _binary_calibration_error_arg_validation,
22
+ _binary_calibration_error_tensor_validation,
23
+ _binary_calibration_error_update,
24
+ _binary_confusion_matrix_format,
25
+ _ce_compute,
26
+ _multiclass_calibration_error_arg_validation,
27
+ _multiclass_calibration_error_tensor_validation,
28
+ _multiclass_calibration_error_update,
29
+ _multiclass_confusion_matrix_format,
30
+ )
31
+ from torchmetrics.metric import Metric
32
+ from torchmetrics.utilities.data import dim_zero_cat
33
+
34
+
35
+ class BinaryCalibrationError(Metric):
36
+ r"""`Top-label Calibration Error`_ for binary tasks. The expected calibration error can be used to quantify how
37
+ well a given model is calibrated e.g. how well the predicted output probabilities of the model matches the
38
+ actual probabilities of the ground truth distribution.
39
+
40
+ Three different norms are implemented, each corresponding to variations on the calibration error metric.
41
+
42
+ .. math::
43
+ \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
44
+
45
+ .. math::
46
+ \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
47
+
48
+ .. math::
49
+ \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
50
+
51
+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
52
+ predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
53
+ in an uniform way in the [0,1] range.
54
+
55
+ As input to ``forward`` and ``update`` the metric accepts the following input:
56
+
57
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for
58
+ each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
59
+ sigmoid per element.
60
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
61
+ therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the
62
+ positive class.
63
+
64
+ As output to ``forward`` and ``compute`` the metric returns the following output:
65
+
66
+ - ``bce`` (:class:`~torch.Tensor`): A scalar tensor containing the calibration error
67
+
68
+ Additional dimension ``...`` will be flattened into the batch dimension.
69
+
70
+ Args:
71
+ n_bins: Number of bins to use when computing the metric.
72
+ norm: Norm used to compare empirical and expected probability bins.
73
+ ignore_index:
74
+ Specifies a target value that is ignored and does not contribute to the metric calculation
75
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
76
+ Set to ``False`` for faster computations.
77
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
78
+
79
+ Example:
80
+ >>> from torchmetrics.classification import BinaryCalibrationError
81
+ >>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75])
82
+ >>> target = torch.tensor([0, 0, 1, 1, 1])
83
+ >>> metric = BinaryCalibrationError(n_bins=2, norm='l1')
84
+ >>> metric(preds, target)
85
+ tensor(0.2900)
86
+ >>> bce = BinaryCalibrationError(n_bins=2, norm='l2')
87
+ >>> bce(preds, target)
88
+ tensor(0.2918)
89
+ >>> bce = BinaryCalibrationError(n_bins=2, norm='max')
90
+ >>> bce(preds, target)
91
+ tensor(0.3167)
92
+ """
93
+ is_differentiable: bool = False
94
+ higher_is_better: bool = False
95
+ full_state_update: bool = False
96
+
97
+ def __init__(
98
+ self,
99
+ n_bins: int = 15,
100
+ norm: Literal["l1", "l2", "max"] = "l1",
101
+ ignore_index: Optional[int] = None,
102
+ validate_args: bool = True,
103
+ **kwargs: Any,
104
+ ) -> None:
105
+ super().__init__(**kwargs)
106
+ if validate_args:
107
+ _binary_calibration_error_arg_validation(n_bins, norm, ignore_index)
108
+ self.validate_args = validate_args
109
+ self.n_bins = n_bins
110
+ self.norm = norm
111
+ self.ignore_index = ignore_index
112
+ self.add_state("confidences", [], dist_reduce_fx="cat")
113
+ self.add_state("accuracies", [], dist_reduce_fx="cat")
114
+
115
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
116
+ if self.validate_args:
117
+ _binary_calibration_error_tensor_validation(preds, target, self.ignore_index)
118
+ preds, target = _binary_confusion_matrix_format(
119
+ preds, target, threshold=0.0, ignore_index=self.ignore_index, convert_to_labels=False
120
+ )
121
+ confidences, accuracies = _binary_calibration_error_update(preds, target)
122
+ self.confidences.append(confidences)
123
+ self.accuracies.append(accuracies)
124
+
125
+ def compute(self) -> Tensor:
126
+ confidences = dim_zero_cat(self.confidences)
127
+ accuracies = dim_zero_cat(self.accuracies)
128
+ return _ce_compute(confidences, accuracies, self.n_bins, norm=self.norm)
129
+
130
+
131
+ class MulticlassCalibrationError(Metric):
132
+ r"""`Top-label Calibration Error`_ for multiclass tasks. The expected calibration error can be used to quantify
133
+ how well a given model is calibrated e.g. how well the predicted output probabilities of the model matches the
134
+ actual probabilities of the ground truth distribution.
135
+
136
+ Three different norms are implemented, each corresponding to variations on the calibration error metric.
137
+
138
+ .. math::
139
+ \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
140
+
141
+ .. math::
142
+ \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
143
+
144
+ .. math::
145
+ \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
146
+
147
+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
148
+ predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
149
+ in an uniform way in the [0,1] range.
150
+
151
+ As input to ``forward`` and ``update`` the metric accepts the following input:
152
+
153
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits for
154
+ each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
155
+ softmax per sample.
156
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
157
+ therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
158
+
159
+ .. note::
160
+ Additional dimension ``...`` will be flattened into the batch dimension.
161
+
162
+ As output to ``forward`` and ``compute`` the metric returns the following output:
163
+
164
+ - ``mcce`` (:class:`~torch.Tensor`): A scalar tensor containing the calibration error
165
+
166
+ Args:
167
+ num_classes: Integer specifing the number of classes
168
+ n_bins: Number of bins to use when computing the metric.
169
+ norm: Norm used to compare empirical and expected probability bins.
170
+ ignore_index:
171
+ Specifies a target value that is ignored and does not contribute to the metric calculation
172
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
173
+ Set to ``False`` for faster computations.
174
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
175
+
176
+ Example:
177
+ >>> from torchmetrics.classification import MulticlassCalibrationError
178
+ >>> preds = torch.tensor([[0.25, 0.20, 0.55],
179
+ ... [0.55, 0.05, 0.40],
180
+ ... [0.10, 0.30, 0.60],
181
+ ... [0.90, 0.05, 0.05]])
182
+ >>> target = torch.tensor([0, 1, 2, 0])
183
+ >>> metric = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='l1')
184
+ >>> metric(preds, target)
185
+ tensor(0.2000)
186
+ >>> mcce = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='l2')
187
+ >>> mcce(preds, target)
188
+ tensor(0.2082)
189
+ >>> mcce = MulticlassCalibrationError(num_classes=3, n_bins=3, norm='max')
190
+ >>> mcce(preds, target)
191
+ tensor(0.2333)
192
+ """
193
+ is_differentiable: bool = False
194
+ higher_is_better: bool = False
195
+ full_state_update: bool = False
196
+
197
+ def __init__(
198
+ self,
199
+ num_classes: int,
200
+ n_bins: int = 15,
201
+ norm: Literal["l1", "l2", "max"] = "l1",
202
+ ignore_index: Optional[int] = None,
203
+ validate_args: bool = True,
204
+ **kwargs: Any,
205
+ ) -> None:
206
+ super().__init__(**kwargs)
207
+ if validate_args:
208
+ _multiclass_calibration_error_arg_validation(num_classes, n_bins, norm, ignore_index)
209
+ self.validate_args = validate_args
210
+ self.num_classes = num_classes
211
+ self.n_bins = n_bins
212
+ self.norm = norm
213
+ self.ignore_index = ignore_index
214
+ self.add_state("confidences", [], dist_reduce_fx="cat")
215
+ self.add_state("accuracies", [], dist_reduce_fx="cat")
216
+
217
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
218
+ if self.validate_args:
219
+ _multiclass_calibration_error_tensor_validation(preds, target, self.num_classes, self.ignore_index)
220
+ preds, target = _multiclass_confusion_matrix_format(
221
+ preds, target, ignore_index=self.ignore_index, convert_to_labels=False
222
+ )
223
+ confidences, accuracies = _multiclass_calibration_error_update(preds, target)
224
+ self.confidences.append(confidences)
225
+ self.accuracies.append(accuracies)
226
+
227
+ def compute(self) -> Tensor:
228
+ confidences = dim_zero_cat(self.confidences)
229
+ accuracies = dim_zero_cat(self.accuracies)
230
+ return _ce_compute(confidences, accuracies, self.n_bins, norm=self.norm)
231
+
232
+
233
+ class CalibrationError:
234
+ r"""`Top-label Calibration Error`_. The expected calibration error can be used to quantify how well a given
235
+ model is calibrated e.g. how well the predicted output probabilities of the model matches the actual
236
+ probabilities of the ground truth distribution.
237
+
238
+ Three different norms are implemented, each corresponding to variations on the calibration error metric.
239
+
240
+ .. math::
241
+ \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
242
+
243
+ .. math::
244
+ \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
245
+
246
+ .. math::
247
+ \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
248
+
249
+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
250
+ predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
251
+ in an uniform way in the [0,1] range.
252
+
253
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
254
+ ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
255
+ :mod:`BinaryCalibrationError` and :mod:`MulticlassCalibrationError` for the specific details of
256
+ each argument influence and examples.
257
+ """
258
+
259
+ def __new__(
260
+ cls,
261
+ task: Literal["binary", "multiclass"] = None,
262
+ n_bins: int = 15,
263
+ norm: Literal["l1", "l2", "max"] = "l1",
264
+ num_classes: Optional[int] = None,
265
+ ignore_index: Optional[int] = None,
266
+ validate_args: bool = True,
267
+ **kwargs: Any,
268
+ ) -> Metric:
269
+ kwargs.update(dict(n_bins=n_bins, norm=norm, ignore_index=ignore_index, validate_args=validate_args))
270
+ if task == "binary":
271
+ return BinaryCalibrationError(**kwargs)
272
+ if task == "multiclass":
273
+ assert isinstance(num_classes, int)
274
+ return MulticlassCalibrationError(num_classes, **kwargs)
275
+ raise ValueError(
276
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
277
+ )
wemm/lib/python3.10/site-packages/torchmetrics/classification/cohen_kappa.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.classification import BinaryConfusionMatrix, MulticlassConfusionMatrix
21
+ from torchmetrics.functional.classification.cohen_kappa import (
22
+ _binary_cohen_kappa_arg_validation,
23
+ _cohen_kappa_reduce,
24
+ _multiclass_cohen_kappa_arg_validation,
25
+ )
26
+ from torchmetrics.metric import Metric
27
+
28
+
29
+ class BinaryCohenKappa(BinaryConfusionMatrix):
30
+ r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement for binary tasks. It is defined
31
+ as.
32
+
33
+ .. math::
34
+ \kappa = (p_o - p_e) / (1 - p_e)
35
+
36
+ where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
37
+ the expected agreement when both annotators assign labels randomly. Note that
38
+ :math:`p_e` is estimated using a per-annotator empirical prior over the
39
+ class labels.
40
+
41
+ As input to ``forward`` and ``update`` the metric accepts the following input:
42
+
43
+ - ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point
44
+ tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per element.
45
+ Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
46
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
47
+
48
+ .. note::
49
+ Additional dimension ``...`` will be flattened into the batch dimension.
50
+
51
+ As output to ``forward`` and ``compute`` the metric returns the following output:
52
+
53
+ - ``bck`` (:class:`~torch.Tensor`): A tensor containing cohen kappa score
54
+
55
+ Args:
56
+ threshold: Threshold for transforming probability to binary (0,1) predictions
57
+ ignore_index:
58
+ Specifies a target value that is ignored and does not contribute to the metric calculation
59
+ weights: Weighting type to calculate the score. Choose from:
60
+
61
+ - ``None`` or ``'none'``: no weighting
62
+ - ``'linear'``: linear weighting
63
+ - ``'quadratic'``: quadratic weighting
64
+
65
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
66
+ Set to ``False`` for faster computations.
67
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
68
+
69
+ Example (preds is int tensor):
70
+ >>> from torchmetrics.classification import BinaryCohenKappa
71
+ >>> target = torch.tensor([1, 1, 0, 0])
72
+ >>> preds = torch.tensor([0, 1, 0, 0])
73
+ >>> metric = BinaryCohenKappa()
74
+ >>> metric(preds, target)
75
+ tensor(0.5000)
76
+
77
+ Example (preds is float tensor):
78
+ >>> from torchmetrics.classification import BinaryCohenKappa
79
+ >>> target = torch.tensor([1, 1, 0, 0])
80
+ >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01])
81
+ >>> metric = BinaryCohenKappa()
82
+ >>> metric(preds, target)
83
+ tensor(0.5000)
84
+ """
85
+ is_differentiable: bool = False
86
+ higher_is_better: bool = True
87
+ full_state_update: bool = False
88
+
89
+ def __init__(
90
+ self,
91
+ threshold: float = 0.5,
92
+ ignore_index: Optional[int] = None,
93
+ weights: Optional[Literal["linear", "quadratic", "none"]] = None,
94
+ validate_args: bool = True,
95
+ **kwargs: Any,
96
+ ) -> None:
97
+ super().__init__(threshold, ignore_index, normalize=None, validate_args=False, **kwargs)
98
+ if validate_args:
99
+ _binary_cohen_kappa_arg_validation(threshold, ignore_index, weights)
100
+ self.weights = weights
101
+ self.validate_args = validate_args
102
+
103
+ def compute(self) -> Tensor:
104
+ return _cohen_kappa_reduce(self.confmat, self.weights)
105
+
106
+
107
+ class MulticlassCohenKappa(MulticlassConfusionMatrix):
108
+ r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement for multiclass tasks. It is
109
+ defined as.
110
+
111
+ .. math::
112
+ \kappa = (p_o - p_e) / (1 - p_e)
113
+
114
+ where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
115
+ the expected agreement when both annotators assign labels randomly. Note that
116
+ :math:`p_e` is estimated using a per-annotator empirical prior over the
117
+ class labels.
118
+
119
+ As input to ``forward`` and ``update`` the metric accepts the following input:
120
+
121
+ - ``preds`` (:class:`~torch.Tensor`): Either an int tensor of shape ``(N, ...)` or float tensor of shape
122
+ ``(N, C, ..)``. If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically
123
+ convert probabilities/logits into an int tensor.
124
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
125
+
126
+ .. note::
127
+ Additional dimension ``...`` will be flattened into the batch dimension.
128
+
129
+ As output to ``forward`` and ``compute`` the metric returns the following output:
130
+
131
+ - ``mcck`` (:class:`~torch.Tensor`): A tensor containing cohen kappa score
132
+
133
+ Args:
134
+ num_classes: Integer specifing the number of classes
135
+ ignore_index:
136
+ Specifies a target value that is ignored and does not contribute to the metric calculation
137
+ weights: Weighting type to calculate the score. Choose from:
138
+
139
+ - ``None`` or ``'none'``: no weighting
140
+ - ``'linear'``: linear weighting
141
+ - ``'quadratic'``: quadratic weighting
142
+
143
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
144
+ Set to ``False`` for faster computations.
145
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
146
+
147
+ Example (pred is integer tensor):
148
+ >>> from torchmetrics.classification import MulticlassCohenKappa
149
+ >>> target = torch.tensor([2, 1, 0, 0])
150
+ >>> preds = torch.tensor([2, 1, 0, 1])
151
+ >>> metric = MulticlassCohenKappa(num_classes=3)
152
+ >>> metric(preds, target)
153
+ tensor(0.6364)
154
+
155
+ Example (pred is float tensor):
156
+ >>> from torchmetrics.classification import MulticlassCohenKappa
157
+ >>> target = torch.tensor([2, 1, 0, 0])
158
+ >>> preds = torch.tensor([
159
+ ... [0.16, 0.26, 0.58],
160
+ ... [0.22, 0.61, 0.17],
161
+ ... [0.71, 0.09, 0.20],
162
+ ... [0.05, 0.82, 0.13],
163
+ ... ])
164
+ >>> metric = MulticlassCohenKappa(num_classes=3)
165
+ >>> metric(preds, target)
166
+ tensor(0.6364)
167
+ """
168
+ is_differentiable: bool = False
169
+ higher_is_better: bool = True
170
+ full_state_update: bool = False
171
+
172
+ def __init__(
173
+ self,
174
+ num_classes: int,
175
+ ignore_index: Optional[int] = None,
176
+ weights: Optional[Literal["linear", "quadratic", "none"]] = None,
177
+ validate_args: bool = True,
178
+ **kwargs: Any,
179
+ ) -> None:
180
+ super().__init__(num_classes, ignore_index, normalize=None, validate_args=False, **kwargs)
181
+ if validate_args:
182
+ _multiclass_cohen_kappa_arg_validation(num_classes, ignore_index, weights)
183
+ self.weights = weights
184
+ self.validate_args = validate_args
185
+
186
+ def compute(self) -> Tensor:
187
+ return _cohen_kappa_reduce(self.confmat, self.weights)
188
+
189
+
190
+ class CohenKappa:
191
+ r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement. It is defined as.
192
+
193
+ .. math::
194
+ \kappa = (p_o - p_e) / (1 - p_e)
195
+
196
+ where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
197
+ the expected agreement when both annotators assign labels randomly. Note that
198
+ :math:`p_e` is estimated using a per-annotator empirical prior over the
199
+ class labels.
200
+
201
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
202
+ ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
203
+ :mod:`BinaryCohenKappa` and :mod:`MulticlassCohenKappa` for the specific details of
204
+ each argument influence and examples.
205
+
206
+ Legacy Example:
207
+ >>> target = torch.tensor([1, 1, 0, 0])
208
+ >>> preds = torch.tensor([0, 1, 0, 0])
209
+ >>> cohenkappa = CohenKappa(task="multiclass", num_classes=2)
210
+ >>> cohenkappa(preds, target)
211
+ tensor(0.5000)
212
+ """
213
+
214
+ def __new__(
215
+ cls,
216
+ task: Literal["binary", "multiclass"],
217
+ threshold: float = 0.5,
218
+ num_classes: Optional[int] = None,
219
+ weights: Optional[Literal["linear", "quadratic", "none"]] = None,
220
+ ignore_index: Optional[int] = None,
221
+ validate_args: bool = True,
222
+ **kwargs: Any,
223
+ ) -> Metric:
224
+ kwargs.update(dict(weights=weights, ignore_index=ignore_index, validate_args=validate_args))
225
+ if task == "binary":
226
+ return BinaryCohenKappa(threshold, **kwargs)
227
+ if task == "multiclass":
228
+ assert isinstance(num_classes, int)
229
+ return MulticlassCohenKappa(num_classes, **kwargs)
230
+ raise ValueError(
231
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
232
+ )
wemm/lib/python3.10/site-packages/torchmetrics/classification/confusion_matrix.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.confusion_matrix import (
21
+ _binary_confusion_matrix_arg_validation,
22
+ _binary_confusion_matrix_compute,
23
+ _binary_confusion_matrix_format,
24
+ _binary_confusion_matrix_tensor_validation,
25
+ _binary_confusion_matrix_update,
26
+ _multiclass_confusion_matrix_arg_validation,
27
+ _multiclass_confusion_matrix_compute,
28
+ _multiclass_confusion_matrix_format,
29
+ _multiclass_confusion_matrix_tensor_validation,
30
+ _multiclass_confusion_matrix_update,
31
+ _multilabel_confusion_matrix_arg_validation,
32
+ _multilabel_confusion_matrix_compute,
33
+ _multilabel_confusion_matrix_format,
34
+ _multilabel_confusion_matrix_tensor_validation,
35
+ _multilabel_confusion_matrix_update,
36
+ )
37
+ from torchmetrics.metric import Metric
38
+
39
+
40
+ class BinaryConfusionMatrix(Metric):
41
+ r"""Computes the `confusion matrix`_ for binary tasks.
42
+
43
+ As input to ``forward`` and ``update`` the metric accepts the following input:
44
+
45
+ - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
46
+ tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
47
+ element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
48
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
49
+
50
+ .. note::
51
+ Additional dimension ``...`` will be flattened into the batch dimension.
52
+
53
+ As output to ``forward`` and ``compute`` the metric returns the following output:
54
+
55
+ - ``bcm`` (:class:`~torch.Tensor`): A tensor containing a ``(2, 2)`` matrix
56
+
57
+ Args:
58
+ threshold: Threshold for transforming probability to binary (0,1) predictions
59
+ ignore_index:
60
+ Specifies a target value that is ignored and does not contribute to the metric calculation
61
+ normalize: Normalization mode for confusion matrix. Choose from:
62
+
63
+ - ``None`` or ``'none'``: no normalization (default)
64
+ - ``'true'``: normalization over the targets (most commonly used)
65
+ - ``'pred'``: normalization over the predictions
66
+ - ``'all'``: normalization over the whole matrix
67
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
68
+ Set to ``False`` for faster computations.
69
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
70
+
71
+ Example (preds is int tensor):
72
+ >>> from torchmetrics.classification import BinaryConfusionMatrix
73
+ >>> target = torch.tensor([1, 1, 0, 0])
74
+ >>> preds = torch.tensor([0, 1, 0, 0])
75
+ >>> bcm = BinaryConfusionMatrix()
76
+ >>> bcm(preds, target)
77
+ tensor([[2, 0],
78
+ [1, 1]])
79
+
80
+ Example (preds is float tensor):
81
+ >>> from torchmetrics.classification import BinaryConfusionMatrix
82
+ >>> target = torch.tensor([1, 1, 0, 0])
83
+ >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01])
84
+ >>> bcm = BinaryConfusionMatrix()
85
+ >>> bcm(preds, target)
86
+ tensor([[2, 0],
87
+ [1, 1]])
88
+ """
89
+ is_differentiable: bool = False
90
+ higher_is_better: Optional[bool] = None
91
+ full_state_update: bool = False
92
+
93
+ def __init__(
94
+ self,
95
+ threshold: float = 0.5,
96
+ ignore_index: Optional[int] = None,
97
+ normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
98
+ validate_args: bool = True,
99
+ **kwargs: Any,
100
+ ) -> None:
101
+ super().__init__(**kwargs)
102
+ if validate_args:
103
+ _binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize)
104
+ self.threshold = threshold
105
+ self.ignore_index = ignore_index
106
+ self.normalize = normalize
107
+ self.validate_args = validate_args
108
+
109
+ self.add_state("confmat", torch.zeros(2, 2, dtype=torch.long), dist_reduce_fx="sum")
110
+
111
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
112
+ """Update state with predictions and targets."""
113
+ if self.validate_args:
114
+ _binary_confusion_matrix_tensor_validation(preds, target, self.ignore_index)
115
+ preds, target = _binary_confusion_matrix_format(preds, target, self.threshold, self.ignore_index)
116
+ confmat = _binary_confusion_matrix_update(preds, target)
117
+ self.confmat += confmat
118
+
119
+ def compute(self) -> Tensor:
120
+ """Computes confusion matrix."""
121
+ return _binary_confusion_matrix_compute(self.confmat, self.normalize)
122
+
123
+
124
+ class MulticlassConfusionMatrix(Metric):
125
+ r"""Computes the `confusion matrix`_ for multiclass tasks.
126
+
127
+ As input to ``forward`` and ``update`` the metric accepts the following input:
128
+
129
+ - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
130
+ tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
131
+ element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
132
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
133
+
134
+ .. note::
135
+ Additional dimension ``...`` will be flattened into the batch dimension.
136
+
137
+ As output to ``forward`` and ``compute`` the metric returns the following output:
138
+
139
+ - ``bcm`` (:class:`~torch.Tensor`): A tensor containing a ``(2, 2)`` matrix
140
+
141
+ ---
142
+
143
+ As input to 'update' the metric accepts the following input:
144
+
145
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
146
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
147
+ an int tensor.
148
+ - ``target`` (int tensor): ``(N, ...)``
149
+
150
+ Additional dimension ``...`` will be flattened into the batch dimension.
151
+
152
+ As output of 'compute' the metric returns the following output:
153
+
154
+ - ``confusion matrix``: [num_classes, num_classes] matrix
155
+
156
+ Args:
157
+ num_classes: Integer specifing the number of classes
158
+ ignore_index:
159
+ Specifies a target value that is ignored and does not contribute to the metric calculation
160
+ normalize: Normalization mode for confusion matrix. Choose from:
161
+
162
+ - ``None`` or ``'none'``: no normalization (default)
163
+ - ``'true'``: normalization over the targets (most commonly used)
164
+ - ``'pred'``: normalization over the predictions
165
+ - ``'all'``: normalization over the whole matrix
166
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
167
+ Set to ``False`` for faster computations.
168
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
169
+
170
+ Example (pred is integer tensor):
171
+ >>> from torchmetrics.classification import MulticlassConfusionMatrix
172
+ >>> target = torch.tensor([2, 1, 0, 0])
173
+ >>> preds = torch.tensor([2, 1, 0, 1])
174
+ >>> metric = MulticlassConfusionMatrix(num_classes=3)
175
+ >>> metric(preds, target)
176
+ tensor([[1, 1, 0],
177
+ [0, 1, 0],
178
+ [0, 0, 1]])
179
+
180
+ Example (pred is float tensor):
181
+ >>> from torchmetrics.classification import MulticlassConfusionMatrix
182
+ >>> target = torch.tensor([2, 1, 0, 0])
183
+ >>> preds = torch.tensor([
184
+ ... [0.16, 0.26, 0.58],
185
+ ... [0.22, 0.61, 0.17],
186
+ ... [0.71, 0.09, 0.20],
187
+ ... [0.05, 0.82, 0.13],
188
+ ... ])
189
+ >>> metric = MulticlassConfusionMatrix(num_classes=3)
190
+ >>> metric(preds, target)
191
+ tensor([[1, 1, 0],
192
+ [0, 1, 0],
193
+ [0, 0, 1]])
194
+ """
195
+ is_differentiable: bool = False
196
+ higher_is_better: Optional[bool] = None
197
+ full_state_update: bool = False
198
+
199
+ def __init__(
200
+ self,
201
+ num_classes: int,
202
+ ignore_index: Optional[int] = None,
203
+ normalize: Optional[Literal["none", "true", "pred", "all"]] = None,
204
+ validate_args: bool = True,
205
+ **kwargs: Any,
206
+ ) -> None:
207
+ super().__init__(**kwargs)
208
+ if validate_args:
209
+ _multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize)
210
+ self.num_classes = num_classes
211
+ self.ignore_index = ignore_index
212
+ self.normalize = normalize
213
+ self.validate_args = validate_args
214
+
215
+ self.add_state("confmat", torch.zeros(num_classes, num_classes, dtype=torch.long), dist_reduce_fx="sum")
216
+
217
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
218
+ """Update state with predictions and targets."""
219
+ if self.validate_args:
220
+ _multiclass_confusion_matrix_tensor_validation(preds, target, self.num_classes, self.ignore_index)
221
+ preds, target = _multiclass_confusion_matrix_format(preds, target, self.ignore_index)
222
+ confmat = _multiclass_confusion_matrix_update(preds, target, self.num_classes)
223
+ self.confmat += confmat
224
+
225
+ def compute(self) -> Tensor:
226
+ """Computes confusion matrix."""
227
+ return _multiclass_confusion_matrix_compute(self.confmat, self.normalize)
228
+
229
+
230
+ class MultilabelConfusionMatrix(Metric):
231
+ r"""Computes the `confusion matrix`_ for multilabel tasks.
232
+
233
+ As input to 'update' the metric accepts the following input:
234
+
235
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
236
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
237
+ we convert to int tensor with thresholding using the value in ``threshold``.
238
+ - ``target`` (int tensor): ``(N, C, ...)``
239
+
240
+ Additional dimension ``...`` will be flattened into the batch dimension.
241
+
242
+ As output of 'compute' the metric returns the following output:
243
+
244
+ - ``confusion matrix``: [num_labels,2,2] matrix
245
+
246
+ Args:
247
+ num_classes: Integer specifing the number of labels
248
+ threshold: Threshold for transforming probability to binary (0,1) predictions
249
+ ignore_index:
250
+ Specifies a target value that is ignored and does not contribute to the metric calculation
251
+ normalize: Normalization mode for confusion matrix. Choose from:
252
+
253
+ - ``None`` or ``'none'``: no normalization (default)
254
+ - ``'true'``: normalization over the targets (most commonly used)
255
+ - ``'pred'``: normalization over the predictions
256
+ - ``'all'``: normalization over the whole matrix
257
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
258
+ Set to ``False`` for faster computations.
259
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
260
+
261
+ Example (preds is int tensor):
262
+ >>> from torchmetrics.classification import MultilabelConfusionMatrix
263
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
264
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
265
+ >>> metric = MultilabelConfusionMatrix(num_labels=3)
266
+ >>> metric(preds, target)
267
+ tensor([[[1, 0], [0, 1]],
268
+ [[1, 0], [1, 0]],
269
+ [[0, 1], [0, 1]]])
270
+
271
+ Example (preds is float tensor):
272
+ >>> from torchmetrics.classification import MultilabelConfusionMatrix
273
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
274
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
275
+ >>> metric = MultilabelConfusionMatrix(num_labels=3)
276
+ >>> metric(preds, target)
277
+ tensor([[[1, 0], [0, 1]],
278
+ [[1, 0], [1, 0]],
279
+ [[0, 1], [0, 1]]])
280
+ """
281
+ is_differentiable: bool = False
282
+ higher_is_better: Optional[bool] = None
283
+ full_state_update: bool = False
284
+
285
+ def __init__(
286
+ self,
287
+ num_labels: int,
288
+ threshold: float = 0.5,
289
+ ignore_index: Optional[int] = None,
290
+ normalize: Optional[Literal["none", "true", "pred", "all"]] = None,
291
+ validate_args: bool = True,
292
+ **kwargs: Any,
293
+ ) -> None:
294
+ super().__init__(**kwargs)
295
+ if validate_args:
296
+ _multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index, normalize)
297
+ self.num_labels = num_labels
298
+ self.threshold = threshold
299
+ self.ignore_index = ignore_index
300
+ self.normalize = normalize
301
+ self.validate_args = validate_args
302
+
303
+ self.add_state("confmat", torch.zeros(num_labels, 2, 2, dtype=torch.long), dist_reduce_fx="sum")
304
+
305
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
306
+ """Update state with predictions and targets."""
307
+ if self.validate_args:
308
+ _multilabel_confusion_matrix_tensor_validation(preds, target, self.num_labels, self.ignore_index)
309
+ preds, target = _multilabel_confusion_matrix_format(
310
+ preds, target, self.num_labels, self.threshold, self.ignore_index
311
+ )
312
+ confmat = _multilabel_confusion_matrix_update(preds, target, self.num_labels)
313
+ self.confmat += confmat
314
+
315
+ def compute(self) -> Tensor:
316
+ """Computes confusion matrix."""
317
+ return _multilabel_confusion_matrix_compute(self.confmat, self.normalize)
318
+
319
+
320
+ class ConfusionMatrix:
321
+ r"""Computes the `confusion matrix`_.
322
+
323
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
324
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
325
+ :mod:`BinaryConfusionMatrix`, :mod:`MulticlassConfusionMatrix` and :func:`MultilabelConfusionMatrix` for
326
+ the specific details of each argument influence and examples.
327
+
328
+ Legacy Example:
329
+ >>> target = torch.tensor([1, 1, 0, 0])
330
+ >>> preds = torch.tensor([0, 1, 0, 0])
331
+ >>> confmat = ConfusionMatrix(task="binary", num_classes=2)
332
+ >>> confmat(preds, target)
333
+ tensor([[2, 0],
334
+ [1, 1]])
335
+
336
+ >>> target = torch.tensor([2, 1, 0, 0])
337
+ >>> preds = torch.tensor([2, 1, 0, 1])
338
+ >>> confmat = ConfusionMatrix(task="multiclass", num_classes=3)
339
+ >>> confmat(preds, target)
340
+ tensor([[1, 1, 0],
341
+ [0, 1, 0],
342
+ [0, 0, 1]])
343
+
344
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
345
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
346
+ >>> confmat = ConfusionMatrix(task="multilabel", num_labels=3)
347
+ >>> confmat(preds, target)
348
+ tensor([[[1, 0], [0, 1]],
349
+ [[1, 0], [1, 0]],
350
+ [[0, 1], [0, 1]]])
351
+ """
352
+
353
+ def __new__(
354
+ cls,
355
+ task: Literal["binary", "multiclass", "multilabel"],
356
+ threshold: float = 0.5,
357
+ num_classes: Optional[int] = None,
358
+ num_labels: Optional[int] = None,
359
+ normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
360
+ ignore_index: Optional[int] = None,
361
+ validate_args: bool = True,
362
+ **kwargs: Any,
363
+ ) -> Metric:
364
+ kwargs.update(dict(normalize=normalize, ignore_index=ignore_index, validate_args=validate_args))
365
+ if task == "binary":
366
+ return BinaryConfusionMatrix(threshold, **kwargs)
367
+ if task == "multiclass":
368
+ assert isinstance(num_classes, int)
369
+ return MulticlassConfusionMatrix(num_classes, **kwargs)
370
+ if task == "multilabel":
371
+ assert isinstance(num_labels, int)
372
+ return MultilabelConfusionMatrix(num_labels, threshold, **kwargs)
373
+ raise ValueError(
374
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
375
+ )
wemm/lib/python3.10/site-packages/torchmetrics/classification/dice.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Callable, Optional, Tuple, no_type_check
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.dice import _dice_compute
21
+ from torchmetrics.functional.classification.stat_scores import _stat_scores_update
22
+ from torchmetrics.metric import Metric
23
+ from torchmetrics.utilities.enums import AverageMethod, MDMCAverageMethod
24
+
25
+
26
+ class Dice(Metric):
27
+ r"""Computes `Dice`_:
28
+
29
+ .. math:: \text{Dice} = \frac{\text{2 * TP}}{\text{2 * TP} + \text{FP} + \text{FN}}
30
+
31
+ Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
32
+ false positives respecitively.
33
+
34
+ It is recommend set `ignore_index` to index of background class.
35
+
36
+ The reduction method (how the precision scores are aggregated) is controlled by the
37
+ ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the
38
+ multi-dimensional multi-class case.
39
+
40
+ As input to ``forward`` and ``update`` the metric accepts the following input:
41
+
42
+ - ``preds`` (:class:`~torch.Tensor`): Predictions from model (probabilities, logits or labels)
43
+ - ``target`` (:class:`~torch.Tensor`): Ground truth values
44
+
45
+ As output to ``forward`` and ``compute`` the metric returns the following output:
46
+
47
+ - ``dice`` (:class:`~torch.Tensor`): A tensor containing the dice score.
48
+
49
+ - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned
50
+ - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number of classes
51
+
52
+ Args:
53
+ num_classes:
54
+ Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods.
55
+ threshold:
56
+ Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case
57
+ of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities.
58
+ zero_division:
59
+ The value to use for the score if denominator equals zero.
60
+ average:
61
+ Defines the reduction that is applied. Should be one of the following:
62
+
63
+ - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes.
64
+ - ``'macro'``: Calculate the metric for each class separately, and average the
65
+ metrics across classes (with equal weights for each class).
66
+ - ``'weighted'``: Calculate the metric for each class separately, and average the
67
+ metrics across classes, weighting each class by its support (``tp + fn``).
68
+ - ``'none'`` or ``None``: Calculate the metric for each class separately, and return
69
+ the metric for every class.
70
+ - ``'samples'``: Calculate the metric for each sample, and average the metrics
71
+ across samples (with equal weights for each sample).
72
+
73
+ .. note::
74
+ What is considered a sample in the multi-dimensional multi-class case
75
+ depends on the value of ``mdmc_average``.
76
+
77
+ mdmc_average:
78
+ Defines how averaging is done for multi-dimensional multi-class inputs (on top of the
79
+ ``average`` parameter). Should be one of the following:
80
+
81
+ - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional
82
+ multi-class.
83
+
84
+ - ``'samplewise'``: In this case, the statistics are computed separately for each
85
+ sample on the ``N`` axis, and then averaged over samples.
86
+ The computation for each sample is done by treating the flattened extra axes ``...``
87
+ as the ``N`` dimension within the sample,
88
+ and computing the metric for the sample based on that.
89
+
90
+ - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs
91
+ are flattened into a new ``N_X`` sample axis, i.e.
92
+ the inputs are treated as if they were ``(N_X, C)``.
93
+ From here on the ``average`` parameter applies as usual.
94
+
95
+ ignore_index:
96
+ Integer specifying a target class to ignore. If given, this class index does not contribute
97
+ to the returned score, regardless of reduction method. If an index is ignored, and ``average=None``
98
+ or ``'none'``, the score for the ignored class will be returned as ``nan``.
99
+
100
+ top_k:
101
+ Number of the highest probability or logit score predictions considered finding the correct label,
102
+ relevant only for (multi-dimensional) multi-class inputs. The
103
+ default value (``None``) will be interpreted as 1 for these inputs.
104
+ Should be left at default (``None``) for all other types of inputs.
105
+
106
+ multiclass:
107
+ Used only in certain special cases, where you want to treat inputs as a different type
108
+ than what they appear to be.
109
+
110
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
111
+
112
+ Raises:
113
+ ValueError:
114
+ If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``.
115
+ ValueError:
116
+ If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``.
117
+ ValueError:
118
+ If ``average`` is set but ``num_classes`` is not provided.
119
+ ValueError:
120
+ If ``num_classes`` is set and ``ignore_index`` is not in the range ``[0, num_classes)``.
121
+
122
+ Example:
123
+ >>> import torch
124
+ >>> from torchmetrics import Dice
125
+ >>> preds = torch.tensor([2, 0, 2, 1])
126
+ >>> target = torch.tensor([1, 1, 2, 0])
127
+ >>> dice = Dice(average='micro')
128
+ >>> dice(preds, target)
129
+ tensor(0.2500)
130
+ """
131
+ is_differentiable: bool = False
132
+ higher_is_better: bool = True
133
+ full_state_update: bool = False
134
+
135
+ @no_type_check
136
+ def __init__(
137
+ self,
138
+ zero_division: int = 0,
139
+ num_classes: Optional[int] = None,
140
+ threshold: float = 0.5,
141
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
142
+ mdmc_average: Optional[str] = "global",
143
+ ignore_index: Optional[int] = None,
144
+ top_k: Optional[int] = None,
145
+ multiclass: Optional[bool] = None,
146
+ **kwargs: Any,
147
+ ) -> None:
148
+ super().__init__(**kwargs)
149
+ allowed_average = ("micro", "macro", "weighted", "samples", "none", None)
150
+ if average not in allowed_average:
151
+ raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.")
152
+
153
+ _reduce_options = (AverageMethod.WEIGHTED, AverageMethod.NONE, None)
154
+ if "reduce" not in kwargs:
155
+ kwargs["reduce"] = AverageMethod.MACRO if average in _reduce_options else average
156
+ if "mdmc_reduce" not in kwargs:
157
+ kwargs["mdmc_reduce"] = mdmc_average
158
+
159
+ self.reduce = average
160
+ self.mdmc_reduce = mdmc_average
161
+ self.num_classes = num_classes
162
+ self.threshold = threshold
163
+ self.multiclass = multiclass
164
+ self.ignore_index = ignore_index
165
+ self.top_k = top_k
166
+
167
+ if average not in ["micro", "macro", "samples"]:
168
+ raise ValueError(f"The `reduce` {average} is not valid.")
169
+
170
+ if mdmc_average not in [None, "samplewise", "global"]:
171
+ raise ValueError(f"The `mdmc_reduce` {mdmc_average} is not valid.")
172
+
173
+ if average == "macro" and (not num_classes or num_classes < 1):
174
+ raise ValueError("When you set `average` as 'macro', you have to provide the number of classes.")
175
+
176
+ if num_classes and ignore_index is not None and (not ignore_index < num_classes or num_classes == 1):
177
+ raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes")
178
+
179
+ default: Callable = lambda: []
180
+ reduce_fn: Optional[str] = "cat"
181
+ if mdmc_average != "samplewise" and average != "samples":
182
+ if average == "micro":
183
+ zeros_shape = []
184
+ elif average == "macro":
185
+ zeros_shape = [num_classes]
186
+ else:
187
+ raise ValueError(f'Wrong reduce="{average}"')
188
+ default = lambda: torch.zeros(zeros_shape, dtype=torch.long)
189
+ reduce_fn = "sum"
190
+
191
+ for s in ("tp", "fp", "tn", "fn"):
192
+ self.add_state(s, default=default(), dist_reduce_fx=reduce_fn)
193
+
194
+ self.average = average
195
+ self.zero_division = zero_division
196
+
197
+ @no_type_check
198
+ def update(self, preds: Tensor, target: Tensor) -> None:
199
+ """Update state with predictions and targets."""
200
+ tp, fp, tn, fn = _stat_scores_update(
201
+ preds,
202
+ target,
203
+ reduce=self.reduce,
204
+ mdmc_reduce=self.mdmc_reduce,
205
+ threshold=self.threshold,
206
+ num_classes=self.num_classes,
207
+ top_k=self.top_k,
208
+ multiclass=self.multiclass,
209
+ ignore_index=self.ignore_index,
210
+ )
211
+
212
+ # Update states
213
+ if self.reduce != AverageMethod.SAMPLES and self.mdmc_reduce != MDMCAverageMethod.SAMPLEWISE:
214
+ self.tp += tp
215
+ self.fp += fp
216
+ self.tn += tn
217
+ self.fn += fn
218
+ else:
219
+ self.tp.append(tp)
220
+ self.fp.append(fp)
221
+ self.tn.append(tn)
222
+ self.fn.append(fn)
223
+
224
+ @no_type_check
225
+ def _get_final_stats(self) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
226
+ """Performs concatenation on the stat scores if neccesary, before passing them to a compute function."""
227
+ tp = torch.cat(self.tp) if isinstance(self.tp, list) else self.tp
228
+ fp = torch.cat(self.fp) if isinstance(self.fp, list) else self.fp
229
+ tn = torch.cat(self.tn) if isinstance(self.tn, list) else self.tn
230
+ fn = torch.cat(self.fn) if isinstance(self.fn, list) else self.fn
231
+ return tp, fp, tn, fn
232
+
233
+ @no_type_check
234
+ def compute(self) -> Tensor:
235
+ """Computes metric."""
236
+ tp, fp, _, fn = self._get_final_stats()
237
+ return _dice_compute(tp, fp, fn, self.average, self.mdmc_reduce, self.zero_division)
wemm/lib/python3.10/site-packages/torchmetrics/classification/hamming.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores
21
+ from torchmetrics.functional.classification.hamming import _hamming_distance_reduce
22
+ from torchmetrics.metric import Metric
23
+
24
+
25
+ class BinaryHammingDistance(BinaryStatScores):
26
+ r"""Computes the average `Hamming distance`_ (also known as Hamming loss) for binary tasks:
27
+
28
+ .. math::
29
+ \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
30
+
31
+ Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
32
+ and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
33
+ tensor.
34
+
35
+ As input to ``forward`` and ``update`` the metric accepts the following input:
36
+
37
+ - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
38
+ tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
39
+ element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
40
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
41
+
42
+
43
+ As output to ``forward`` and ``compute`` the metric returns the following output:
44
+
45
+ - ``bhd`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` arguments:
46
+
47
+ - If ``multidim_average`` is set to ``global``, the metric returns a scalar value.
48
+ - If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a
49
+ scalar value per sample.
50
+
51
+ Args:
52
+ threshold: Threshold for transforming probability to binary {0,1} predictions
53
+ multidim_average:
54
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
55
+
56
+ - ``global``: Additional dimensions are flatted along the batch dimension
57
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
58
+ The statistics in this case are calculated over the additional dimensions.
59
+
60
+ ignore_index:
61
+ Specifies a target value that is ignored and does not contribute to the metric calculation
62
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
63
+ Set to ``False`` for faster computations.
64
+
65
+ Example (preds is int tensor):
66
+ >>> from torchmetrics.classification import BinaryHammingDistance
67
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
68
+ >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
69
+ >>> metric = BinaryHammingDistance()
70
+ >>> metric(preds, target)
71
+ tensor(0.3333)
72
+
73
+ Example (preds is float tensor):
74
+ >>> from torchmetrics.classification import BinaryHammingDistance
75
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
76
+ >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
77
+ >>> metric = BinaryHammingDistance()
78
+ >>> metric(preds, target)
79
+ tensor(0.3333)
80
+
81
+ Example (multidim tensors):
82
+ >>> from torchmetrics.classification import BinaryHammingDistance
83
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
84
+ >>> preds = torch.tensor(
85
+ ... [
86
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
87
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
88
+ ... ]
89
+ ... )
90
+ >>> metric = BinaryHammingDistance(multidim_average='samplewise')
91
+ >>> metric(preds, target)
92
+ tensor([0.6667, 0.8333])
93
+ """
94
+
95
+ is_differentiable: bool = False
96
+ higher_is_better: bool = False
97
+ full_state_update: bool = False
98
+
99
+ def compute(self) -> Tensor:
100
+ tp, fp, tn, fn = self._final_state()
101
+ return _hamming_distance_reduce(tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average)
102
+
103
+
104
+ class MulticlassHammingDistance(MulticlassStatScores):
105
+ r"""Computes the average `Hamming distance`_ (also known as Hamming loss) for multiclass tasks:
106
+
107
+ .. math::
108
+ \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
109
+
110
+ Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
111
+ and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
112
+ tensor.
113
+
114
+ As input to ``forward`` and ``update`` the metric accepts the following input:
115
+
116
+ - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
117
+ If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
118
+ probabilities/logits into an int tensor.
119
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
120
+
121
+
122
+ As output to ``forward`` and ``compute`` the metric returns the following output:
123
+
124
+ - ``mchd`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
125
+ ``multidim_average`` arguments:
126
+
127
+ - If ``multidim_average`` is set to ``global``:
128
+
129
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
130
+ - If ``average=None/'none'``, the shape will be ``(C,)``
131
+
132
+ - If ``multidim_average`` is set to ``samplewise``:
133
+
134
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
135
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
136
+
137
+ Args:
138
+ num_classes: Integer specifing the number of classes
139
+ average:
140
+ Defines the reduction that is applied over labels. Should be one of the following:
141
+
142
+ - ``micro``: Sum statistics over all labels
143
+ - ``macro``: Calculate statistics for each label and average them
144
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
145
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
146
+ top_k:
147
+ Number of highest probability or logit score predictions considered to find the correct label.
148
+ Only works when ``preds`` contain probabilities/logits.
149
+ multidim_average:
150
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
151
+
152
+ - ``global``: Additional dimensions are flatted along the batch dimension
153
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
154
+ The statistics in this case are calculated over the additional dimensions.
155
+
156
+ ignore_index:
157
+ Specifies a target value that is ignored and does not contribute to the metric calculation
158
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
159
+ Set to ``False`` for faster computations.
160
+
161
+ Example (preds is int tensor):
162
+ >>> from torchmetrics.classification import MulticlassHammingDistance
163
+ >>> target = torch.tensor([2, 1, 0, 0])
164
+ >>> preds = torch.tensor([2, 1, 0, 1])
165
+ >>> metric = MulticlassHammingDistance(num_classes=3)
166
+ >>> metric(preds, target)
167
+ tensor(0.1667)
168
+ >>> mchd = MulticlassHammingDistance(num_classes=3, average=None)
169
+ >>> mchd(preds, target)
170
+ tensor([0.5000, 0.0000, 0.0000])
171
+
172
+ Example (preds is float tensor):
173
+ >>> from torchmetrics.classification import MulticlassHammingDistance
174
+ >>> target = torch.tensor([2, 1, 0, 0])
175
+ >>> preds = torch.tensor([
176
+ ... [0.16, 0.26, 0.58],
177
+ ... [0.22, 0.61, 0.17],
178
+ ... [0.71, 0.09, 0.20],
179
+ ... [0.05, 0.82, 0.13],
180
+ ... ])
181
+ >>> metric = MulticlassHammingDistance(num_classes=3)
182
+ >>> metric(preds, target)
183
+ tensor(0.1667)
184
+ >>> mchd = MulticlassHammingDistance(num_classes=3, average=None)
185
+ >>> mchd(preds, target)
186
+ tensor([0.5000, 0.0000, 0.0000])
187
+
188
+ Example (multidim tensors):
189
+ >>> from torchmetrics.classification import MulticlassHammingDistance
190
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
191
+ >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
192
+ >>> metric = MulticlassHammingDistance(num_classes=3, multidim_average='samplewise')
193
+ >>> metric(preds, target)
194
+ tensor([0.5000, 0.7222])
195
+ >>> mchd = MulticlassHammingDistance(num_classes=3, multidim_average='samplewise', average=None)
196
+ >>> mchd(preds, target)
197
+ tensor([[0.0000, 1.0000, 0.5000],
198
+ [1.0000, 0.6667, 0.5000]])
199
+ """
200
+
201
+ is_differentiable: bool = False
202
+ higher_is_better: bool = False
203
+ full_state_update: bool = False
204
+
205
+ def compute(self) -> Tensor:
206
+ tp, fp, tn, fn = self._final_state()
207
+ return _hamming_distance_reduce(tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average)
208
+
209
+
210
+ class MultilabelHammingDistance(MultilabelStatScores):
211
+ r"""Computes the average `Hamming distance`_ (also known as Hamming loss) for multilabel tasks:
212
+
213
+ .. math::
214
+ \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
215
+
216
+ Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
217
+ and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
218
+ tensor.
219
+
220
+ As input to ``forward`` and ``update`` the metric accepts the following input:
221
+
222
+ - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ...)``. If preds is a
223
+ floating point tensor with values outside [0,1] range we consider the input to be logits and will auto
224
+ apply sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value in
225
+ ``threshold``.
226
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.
227
+
228
+
229
+ As output to ``forward`` and ``compute`` the metric returns the following output:
230
+
231
+ - ``mlhd`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
232
+ ``multidim_average`` arguments:
233
+
234
+ - If ``multidim_average`` is set to ``global``:
235
+
236
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
237
+ - If ``average=None/'none'``, the shape will be ``(C,)``
238
+
239
+ - If ``multidim_average`` is set to ``samplewise``:
240
+
241
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
242
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
243
+
244
+ Args:
245
+ num_labels: Integer specifing the number of labels
246
+ threshold: Threshold for transforming probability to binary (0,1) predictions
247
+ average:
248
+ Defines the reduction that is applied over labels. Should be one of the following:
249
+
250
+ - ``micro``: Sum statistics over all labels
251
+ - ``macro``: Calculate statistics for each label and average them
252
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
253
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
254
+
255
+ multidim_average:
256
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
257
+
258
+ - ``global``: Additional dimensions are flatted along the batch dimension
259
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
260
+ The statistics in this case are calculated over the additional dimensions.
261
+
262
+ ignore_index:
263
+ Specifies a target value that is ignored and does not contribute to the metric calculation
264
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
265
+ Set to ``False`` for faster computations.
266
+
267
+ Example (preds is int tensor):
268
+ >>> from torchmetrics.classification import MultilabelHammingDistance
269
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
270
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
271
+ >>> metric = MultilabelHammingDistance(num_labels=3)
272
+ >>> metric(preds, target)
273
+ tensor(0.3333)
274
+ >>> mlhd = MultilabelHammingDistance(num_labels=3, average=None)
275
+ >>> mlhd(preds, target)
276
+ tensor([0.0000, 0.5000, 0.5000])
277
+
278
+ Example (preds is float tensor):
279
+ >>> from torchmetrics.classification import MultilabelHammingDistance
280
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
281
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
282
+ >>> metric = MultilabelHammingDistance(num_labels=3)
283
+ >>> metric(preds, target)
284
+ tensor(0.3333)
285
+ >>> mlhd = MultilabelHammingDistance(num_labels=3, average=None)
286
+ >>> mlhd(preds, target)
287
+ tensor([0.0000, 0.5000, 0.5000])
288
+
289
+ Example (multidim tensors):
290
+ >>> from torchmetrics.classification import MultilabelHammingDistance
291
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
292
+ >>> preds = torch.tensor(
293
+ ... [
294
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
295
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
296
+ ... ]
297
+ ... )
298
+ >>> metric = MultilabelHammingDistance(num_labels=3, multidim_average='samplewise')
299
+ >>> metric(preds, target)
300
+ tensor([0.6667, 0.8333])
301
+ >>> mlhd = MultilabelHammingDistance(num_labels=3, multidim_average='samplewise', average=None)
302
+ >>> mlhd(preds, target)
303
+ tensor([[0.5000, 0.5000, 1.0000],
304
+ [1.0000, 1.0000, 0.5000]])
305
+ """
306
+
307
+ is_differentiable: bool = False
308
+ higher_is_better: bool = False
309
+ full_state_update: bool = False
310
+
311
+ def compute(self) -> Tensor:
312
+ tp, fp, tn, fn = self._final_state()
313
+ return _hamming_distance_reduce(
314
+ tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average, multilabel=True
315
+ )
316
+
317
+
318
+ class HammingDistance:
319
+ r"""Computes the average `Hamming distance`_ (also known as Hamming loss):
320
+
321
+ .. math::
322
+ \text{Hamming distance} = \frac{1}{N \cdot L} \sum_i^N \sum_l^L 1(y_{il} \neq \hat{y}_{il})
323
+
324
+ Where :math:`y` is a tensor of target values, :math:`\hat{y}` is a tensor of predictions,
325
+ and :math:`\bullet_{il}` refers to the :math:`l`-th label of the :math:`i`-th sample of that
326
+ tensor.
327
+
328
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
329
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
330
+ :mod:`BinaryHammingDistance`, :mod:`MulticlassHammingDistance` and :mod:`MultilabelHammingDistance` for the
331
+ specific details of each argument influence and examples.
332
+
333
+ Legacy Example:
334
+ >>> target = torch.tensor([[0, 1], [1, 1]])
335
+ >>> preds = torch.tensor([[0, 1], [0, 1]])
336
+ >>> hamming_distance = HammingDistance(task="multilabel", num_labels=2)
337
+ >>> hamming_distance(preds, target)
338
+ tensor(0.2500)
339
+ """
340
+
341
+ def __new__(
342
+ cls,
343
+ task: Literal["binary", "multiclass", "multilabel"],
344
+ threshold: float = 0.5,
345
+ num_classes: Optional[int] = None,
346
+ num_labels: Optional[int] = None,
347
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
348
+ multidim_average: Optional[Literal["global", "samplewise"]] = "global",
349
+ top_k: Optional[int] = 1,
350
+ ignore_index: Optional[int] = None,
351
+ validate_args: bool = True,
352
+ **kwargs: Any,
353
+ ) -> Metric:
354
+
355
+ assert multidim_average is not None
356
+ kwargs.update(dict(multidim_average=multidim_average, ignore_index=ignore_index, validate_args=validate_args))
357
+ if task == "binary":
358
+ return BinaryHammingDistance(threshold, **kwargs)
359
+ if task == "multiclass":
360
+ assert isinstance(num_classes, int)
361
+ assert isinstance(top_k, int)
362
+ return MulticlassHammingDistance(num_classes, top_k, average, **kwargs)
363
+ if task == "multilabel":
364
+ assert isinstance(num_labels, int)
365
+ return MultilabelHammingDistance(num_labels, threshold, average, **kwargs)
366
+ raise ValueError(
367
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
368
+ )
wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall.py ADDED
@@ -0,0 +1,701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.classification.stat_scores import BinaryStatScores, MulticlassStatScores, MultilabelStatScores
21
+ from torchmetrics.functional.classification.precision_recall import _precision_recall_reduce
22
+ from torchmetrics.metric import Metric
23
+
24
+
25
+ class BinaryPrecision(BinaryStatScores):
26
+ r"""Computes `Precision`_ for binary tasks:
27
+
28
+ .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
29
+
30
+ Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
31
+ false positives respecitively.
32
+
33
+ As input to ``forward`` and ``update`` the metric accepts the following input:
34
+
35
+ - ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point
36
+ tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
37
+ element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
38
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
39
+
40
+
41
+ As output to ``forward`` and ``compute`` the metric returns the following output:
42
+
43
+ - ``bp`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, the metric returns a scalar
44
+ value. If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a
45
+ scalar value per sample.
46
+
47
+ Args:
48
+ threshold: Threshold for transforming probability to binary {0,1} predictions
49
+ multidim_average:
50
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
51
+
52
+ - ``global``: Additional dimensions are flatted along the batch dimension
53
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
54
+ The statistics in this case are calculated over the additional dimensions.
55
+
56
+ ignore_index:
57
+ Specifies a target value that is ignored and does not contribute to the metric calculation
58
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
59
+ Set to ``False`` for faster computations.
60
+
61
+ Example (preds is int tensor):
62
+ >>> from torchmetrics.classification import BinaryPrecision
63
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
64
+ >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
65
+ >>> metric = BinaryPrecision()
66
+ >>> metric(preds, target)
67
+ tensor(0.6667)
68
+
69
+ Example (preds is float tensor):
70
+ >>> from torchmetrics.classification import BinaryPrecision
71
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
72
+ >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
73
+ >>> metric = BinaryPrecision()
74
+ >>> metric(preds, target)
75
+ tensor(0.6667)
76
+
77
+ Example (multidim tensors):
78
+ >>> from torchmetrics.classification import BinaryPrecision
79
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
80
+ >>> preds = torch.tensor(
81
+ ... [
82
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
83
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
84
+ ... ]
85
+ ... )
86
+ >>> metric = BinaryPrecision(multidim_average='samplewise')
87
+ >>> metric(preds, target)
88
+ tensor([0.4000, 0.0000])
89
+ """
90
+ is_differentiable: bool = False
91
+ higher_is_better: Optional[bool] = True
92
+ full_state_update: bool = False
93
+
94
+ def compute(self) -> Tensor:
95
+ tp, fp, tn, fn = self._final_state()
96
+ return _precision_recall_reduce(
97
+ "precision", tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average
98
+ )
99
+
100
+
101
+ class MulticlassPrecision(MulticlassStatScores):
102
+ r"""Computes `Precision`_ for multiclass tasks.
103
+
104
+ .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
105
+
106
+ Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
107
+ false positives respecitively.
108
+
109
+ As input to ``forward`` and ``update`` the metric accepts the following input:
110
+
111
+ - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
112
+ If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
113
+ probabilities/logits into an int tensor.
114
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.
115
+
116
+
117
+ As output to ``forward`` and ``compute`` the metric returns the following output:
118
+
119
+ - ``mcp`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
120
+ arguments:
121
+
122
+ - If ``multidim_average`` is set to ``global``:
123
+
124
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
125
+ - If ``average=None/'none'``, the shape will be ``(C,)``
126
+
127
+ - If ``multidim_average`` is set to ``samplewise``:
128
+
129
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
130
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
131
+
132
+ Args:
133
+ num_classes: Integer specifing the number of classes
134
+ average:
135
+ Defines the reduction that is applied over labels. Should be one of the following:
136
+
137
+ - ``micro``: Sum statistics over all labels
138
+ - ``macro``: Calculate statistics for each label and average them
139
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
140
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
141
+ top_k:
142
+ Number of highest probability or logit score predictions considered to find the correct label.
143
+ Only works when ``preds`` contain probabilities/logits.
144
+ multidim_average:
145
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
146
+
147
+ - ``global``: Additional dimensions are flatted along the batch dimension
148
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
149
+ The statistics in this case are calculated over the additional dimensions.
150
+
151
+ ignore_index:
152
+ Specifies a target value that is ignored and does not contribute to the metric calculation
153
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
154
+ Set to ``False`` for faster computations.
155
+
156
+ Example (preds is int tensor):
157
+ >>> from torchmetrics.classification import MulticlassPrecision
158
+ >>> target = torch.tensor([2, 1, 0, 0])
159
+ >>> preds = torch.tensor([2, 1, 0, 1])
160
+ >>> metric = MulticlassPrecision(num_classes=3)
161
+ >>> metric(preds, target)
162
+ tensor(0.8333)
163
+ >>> mcp = MulticlassPrecision(num_classes=3, average=None)
164
+ >>> mcp(preds, target)
165
+ tensor([1.0000, 0.5000, 1.0000])
166
+
167
+ Example (preds is float tensor):
168
+ >>> from torchmetrics.classification import MulticlassPrecision
169
+ >>> target = torch.tensor([2, 1, 0, 0])
170
+ >>> preds = torch.tensor([
171
+ ... [0.16, 0.26, 0.58],
172
+ ... [0.22, 0.61, 0.17],
173
+ ... [0.71, 0.09, 0.20],
174
+ ... [0.05, 0.82, 0.13],
175
+ ... ])
176
+ >>> metric = MulticlassPrecision(num_classes=3)
177
+ >>> metric(preds, target)
178
+ tensor(0.8333)
179
+ >>> mcp = MulticlassPrecision(num_classes=3, average=None)
180
+ >>> mcp(preds, target)
181
+ tensor([1.0000, 0.5000, 1.0000])
182
+
183
+ Example (multidim tensors):
184
+ >>> from torchmetrics.classification import MulticlassPrecision
185
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
186
+ >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
187
+ >>> metric = MulticlassPrecision(num_classes=3, multidim_average='samplewise')
188
+ >>> metric(preds, target)
189
+ tensor([0.3889, 0.2778])
190
+ >>> mcp = MulticlassPrecision(num_classes=3, multidim_average='samplewise', average=None)
191
+ >>> mcp(preds, target)
192
+ tensor([[0.6667, 0.0000, 0.5000],
193
+ [0.0000, 0.5000, 0.3333]])
194
+ """
195
+ is_differentiable: bool = False
196
+ higher_is_better: Optional[bool] = True
197
+ full_state_update: bool = False
198
+
199
+ def compute(self) -> Tensor:
200
+ tp, fp, tn, fn = self._final_state()
201
+ return _precision_recall_reduce(
202
+ "precision", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average
203
+ )
204
+
205
+
206
+ class MultilabelPrecision(MultilabelStatScores):
207
+ r"""Computes `Precision`_ for multilabel tasks.
208
+
209
+ .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
210
+
211
+ Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
212
+ false positives respecitively.
213
+
214
+ As input to ``forward`` and ``update`` the metric accepts the following input:
215
+
216
+ - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, C, ...)``.
217
+ If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and
218
+ will auto apply sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value
219
+ in ``threshold``.
220
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.
221
+
222
+
223
+ As output to ``forward`` and ``compute`` the metric returns the following output:
224
+
225
+ - ``mlp`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
226
+ arguments:
227
+
228
+ - If ``multidim_average`` is set to ``global``:
229
+
230
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
231
+ - If ``average=None/'none'``, the shape will be ``(C,)``
232
+
233
+ - If ``multidim_average`` is set to ``samplewise``:
234
+
235
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
236
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
237
+
238
+ Args:
239
+ num_labels: Integer specifing the number of labels
240
+ threshold: Threshold for transforming probability to binary (0,1) predictions
241
+ average:
242
+ Defines the reduction that is applied over labels. Should be one of the following:
243
+
244
+ - ``micro``: Sum statistics over all labels
245
+ - ``macro``: Calculate statistics for each label and average them
246
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
247
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
248
+
249
+ multidim_average:
250
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
251
+
252
+ - ``global``: Additional dimensions are flatted along the batch dimension
253
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
254
+ The statistics in this case are calculated over the additional dimensions.
255
+
256
+ ignore_index:
257
+ Specifies a target value that is ignored and does not contribute to the metric calculation
258
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
259
+ Set to ``False`` for faster computations.
260
+
261
+ Example (preds is int tensor):
262
+ >>> from torchmetrics.classification import MultilabelPrecision
263
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
264
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
265
+ >>> metric = MultilabelPrecision(num_labels=3)
266
+ >>> metric(preds, target)
267
+ tensor(0.5000)
268
+ >>> mlp = MultilabelPrecision(num_labels=3, average=None)
269
+ >>> mlp(preds, target)
270
+ tensor([1.0000, 0.0000, 0.5000])
271
+
272
+ Example (preds is float tensor):
273
+ >>> from torchmetrics.classification import MultilabelPrecision
274
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
275
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
276
+ >>> metric = MultilabelPrecision(num_labels=3)
277
+ >>> metric(preds, target)
278
+ tensor(0.5000)
279
+ >>> mlp = MultilabelPrecision(num_labels=3, average=None)
280
+ >>> mlp(preds, target)
281
+ tensor([1.0000, 0.0000, 0.5000])
282
+
283
+ Example (multidim tensors):
284
+ >>> from torchmetrics.classification import MultilabelPrecision
285
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
286
+ >>> preds = torch.tensor(
287
+ ... [
288
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
289
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
290
+ ... ]
291
+ ... )
292
+ >>> metric = MultilabelPrecision(num_labels=3, multidim_average='samplewise')
293
+ >>> metric(preds, target)
294
+ tensor([0.3333, 0.0000])
295
+ >>> mlp = MultilabelPrecision(num_labels=3, multidim_average='samplewise', average=None)
296
+ >>> mlp(preds, target)
297
+ tensor([[0.5000, 0.5000, 0.0000],
298
+ [0.0000, 0.0000, 0.0000]])
299
+ """
300
+ is_differentiable: bool = False
301
+ higher_is_better: Optional[bool] = True
302
+ full_state_update: bool = False
303
+
304
+ def compute(self) -> Tensor:
305
+ tp, fp, tn, fn = self._final_state()
306
+ return _precision_recall_reduce(
307
+ "precision", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average
308
+ )
309
+
310
+
311
+ class BinaryRecall(BinaryStatScores):
312
+ r"""Computes `Recall`_ for binary tasks:
313
+
314
+ .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
315
+
316
+ Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
317
+ false negatives respecitively.
318
+
319
+ As input to ``forward`` and ``update`` the metric accepts the following input:
320
+
321
+ - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, ...)``. If preds is a
322
+ floating point tensor with values outside [0,1] range we consider the input to be logits and will auto apply
323
+ sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
324
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
325
+
326
+
327
+ As output to ``forward`` and ``compute`` the metric returns the following output:
328
+
329
+ - ``br`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, the metric returns a scalar
330
+ value. If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of
331
+ a scalar value per sample.
332
+
333
+ Args:
334
+ threshold: Threshold for transforming probability to binary {0,1} predictions
335
+ multidim_average:
336
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
337
+
338
+ - ``global``: Additional dimensions are flatted along the batch dimension
339
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
340
+ The statistics in this case are calculated over the additional dimensions.
341
+
342
+ ignore_index:
343
+ Specifies a target value that is ignored and does not contribute to the metric calculation
344
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
345
+ Set to ``False`` for faster computations.
346
+
347
+ Example (preds is int tensor):
348
+ >>> from torchmetrics.classification import BinaryRecall
349
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
350
+ >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
351
+ >>> metric = BinaryRecall()
352
+ >>> metric(preds, target)
353
+ tensor(0.6667)
354
+
355
+ Example (preds is float tensor):
356
+ >>> from torchmetrics.classification import BinaryRecall
357
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
358
+ >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
359
+ >>> metric = BinaryRecall()
360
+ >>> metric(preds, target)
361
+ tensor(0.6667)
362
+
363
+ Example (multidim tensors):
364
+ >>> from torchmetrics.classification import BinaryRecall
365
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
366
+ >>> preds = torch.tensor(
367
+ ... [
368
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
369
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
370
+ ... ]
371
+ ... )
372
+ >>> metric = BinaryRecall(multidim_average='samplewise')
373
+ >>> metric(preds, target)
374
+ tensor([0.6667, 0.0000])
375
+ """
376
+ is_differentiable: bool = False
377
+ higher_is_better: Optional[bool] = True
378
+ full_state_update: bool = False
379
+
380
+ def compute(self) -> Tensor:
381
+ tp, fp, tn, fn = self._final_state()
382
+ return _precision_recall_reduce(
383
+ "recall", tp, fp, tn, fn, average="binary", multidim_average=self.multidim_average
384
+ )
385
+
386
+
387
+ class MulticlassRecall(MulticlassStatScores):
388
+ r"""Computes `Recall`_ for multiclass tasks:
389
+
390
+ .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
391
+
392
+ Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
393
+ false negatives respecitively.
394
+
395
+ As input to ``forward`` and ``update`` the metric accepts the following input:
396
+
397
+ - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``
398
+ If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
399
+ probabilities/logits into an int tensor.
400
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``
401
+
402
+
403
+ As output to ``forward`` and ``compute`` the metric returns the following output:
404
+
405
+ - ``mcr`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
406
+ arguments:
407
+
408
+ - If ``multidim_average`` is set to ``global``:
409
+
410
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
411
+ - If ``average=None/'none'``, the shape will be ``(C,)``
412
+
413
+ - If ``multidim_average`` is set to ``samplewise``:
414
+
415
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
416
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
417
+
418
+ Args:
419
+ num_classes: Integer specifing the number of classes
420
+ average:
421
+ Defines the reduction that is applied over labels. Should be one of the following:
422
+
423
+ - ``micro``: Sum statistics over all labels
424
+ - ``macro``: Calculate statistics for each label and average them
425
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
426
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
427
+ top_k:
428
+ Number of highest probability or logit score predictions considered to find the correct label.
429
+ Only works when ``preds`` contain probabilities/logits.
430
+ multidim_average:
431
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
432
+
433
+ - ``global``: Additional dimensions are flatted along the batch dimension
434
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
435
+ The statistics in this case are calculated over the additional dimensions.
436
+
437
+ ignore_index:
438
+ Specifies a target value that is ignored and does not contribute to the metric calculation
439
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
440
+ Set to ``False`` for faster computations.
441
+
442
+ Example (preds is int tensor):
443
+ >>> from torchmetrics.classification import MulticlassRecall
444
+ >>> target = torch.tensor([2, 1, 0, 0])
445
+ >>> preds = torch.tensor([2, 1, 0, 1])
446
+ >>> metric = MulticlassRecall(num_classes=3)
447
+ >>> metric(preds, target)
448
+ tensor(0.8333)
449
+ >>> mcr = MulticlassRecall(num_classes=3, average=None)
450
+ >>> mcr(preds, target)
451
+ tensor([0.5000, 1.0000, 1.0000])
452
+
453
+ Example (preds is float tensor):
454
+ >>> from torchmetrics.classification import MulticlassRecall
455
+ >>> target = torch.tensor([2, 1, 0, 0])
456
+ >>> preds = torch.tensor([
457
+ ... [0.16, 0.26, 0.58],
458
+ ... [0.22, 0.61, 0.17],
459
+ ... [0.71, 0.09, 0.20],
460
+ ... [0.05, 0.82, 0.13],
461
+ ... ])
462
+ >>> metric = MulticlassRecall(num_classes=3)
463
+ >>> metric(preds, target)
464
+ tensor(0.8333)
465
+ >>> mcr = MulticlassRecall(num_classes=3, average=None)
466
+ >>> mcr(preds, target)
467
+ tensor([0.5000, 1.0000, 1.0000])
468
+
469
+ Example (multidim tensors):
470
+ >>> from torchmetrics.classification import MulticlassRecall
471
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
472
+ >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
473
+ >>> metric = MulticlassRecall(num_classes=3, multidim_average='samplewise')
474
+ >>> metric(preds, target)
475
+ tensor([0.5000, 0.2778])
476
+ >>> mcr = MulticlassRecall(num_classes=3, multidim_average='samplewise', average=None)
477
+ >>> mcr(preds, target)
478
+ tensor([[1.0000, 0.0000, 0.5000],
479
+ [0.0000, 0.3333, 0.5000]])
480
+ """
481
+ is_differentiable: bool = False
482
+ higher_is_better: Optional[bool] = True
483
+ full_state_update: bool = False
484
+
485
+ def compute(self) -> Tensor:
486
+ tp, fp, tn, fn = self._final_state()
487
+ return _precision_recall_reduce(
488
+ "recall", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average
489
+ )
490
+
491
+
492
+ class MultilabelRecall(MultilabelStatScores):
493
+ r"""Computes `Recall`_ for multilabel tasks:
494
+
495
+ .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
496
+
497
+ Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
498
+ false negatives respecitively.
499
+
500
+ As input to ``forward`` and ``update`` the metric accepts the following input:
501
+
502
+ - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
503
+ point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
504
+ per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
505
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``
506
+
507
+
508
+ As output to ``forward`` and ``compute`` the metric returns the following output:
509
+
510
+ - ``mlr`` (:class:`~torch.Tensor`): The returned shape depends on the ``average`` and ``multidim_average``
511
+ arguments:
512
+
513
+ - If ``multidim_average`` is set to ``global``:
514
+
515
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
516
+ - If ``average=None/'none'``, the shape will be ``(C,)``
517
+
518
+ - If ``multidim_average`` is set to ``samplewise``:
519
+
520
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
521
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
522
+
523
+ Args:
524
+ num_labels: Integer specifing the number of labels
525
+ threshold: Threshold for transforming probability to binary (0,1) predictions
526
+ average:
527
+ Defines the reduction that is applied over labels. Should be one of the following:
528
+
529
+ - ``micro``: Sum statistics over all labels
530
+ - ``macro``: Calculate statistics for each label and average them
531
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
532
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
533
+
534
+ multidim_average:
535
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
536
+
537
+ - ``global``: Additional dimensions are flatted along the batch dimension
538
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
539
+ The statistics in this case are calculated over the additional dimensions.
540
+
541
+ ignore_index:
542
+ Specifies a target value that is ignored and does not contribute to the metric calculation
543
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
544
+ Set to ``False`` for faster computations.
545
+
546
+ Example (preds is int tensor):
547
+ >>> from torchmetrics.classification import MultilabelRecall
548
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
549
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
550
+ >>> metric = MultilabelRecall(num_labels=3)
551
+ >>> metric(preds, target)
552
+ tensor(0.6667)
553
+ >>> mlr = MultilabelRecall(num_labels=3, average=None)
554
+ >>> mlr(preds, target)
555
+ tensor([1., 0., 1.])
556
+
557
+ Example (preds is float tensor):
558
+ >>> from torchmetrics.classification import MultilabelRecall
559
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
560
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
561
+ >>> metric = MultilabelRecall(num_labels=3)
562
+ >>> metric(preds, target)
563
+ tensor(0.6667)
564
+ >>> mlr = MultilabelRecall(num_labels=3, average=None)
565
+ >>> mlr(preds, target)
566
+ tensor([1., 0., 1.])
567
+
568
+ Example (multidim tensors):
569
+ >>> from torchmetrics.classification import MultilabelRecall
570
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
571
+ >>> preds = torch.tensor(
572
+ ... [
573
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
574
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
575
+ ... ]
576
+ ... )
577
+ >>> metric = MultilabelRecall(num_labels=3, multidim_average='samplewise')
578
+ >>> metric(preds, target)
579
+ tensor([0.6667, 0.0000])
580
+ >>> mlr = MultilabelRecall(num_labels=3, multidim_average='samplewise', average=None)
581
+ >>> mlr(preds, target)
582
+ tensor([[1., 1., 0.],
583
+ [0., 0., 0.]])
584
+ """
585
+ is_differentiable: bool = False
586
+ higher_is_better: Optional[bool] = True
587
+ full_state_update: bool = False
588
+
589
+ def compute(self) -> Tensor:
590
+ tp, fp, tn, fn = self._final_state()
591
+ return _precision_recall_reduce(
592
+ "recall", tp, fp, tn, fn, average=self.average, multidim_average=self.multidim_average
593
+ )
594
+
595
+
596
+ class Precision:
597
+ r"""Computes `Precision`_:
598
+
599
+ .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
600
+
601
+ Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
602
+ false positives respecitively.
603
+
604
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
605
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
606
+ :mod:`BinaryPrecision`, :func:`MulticlassPrecision` and :func:`MultilabelPrecision` for the specific details of
607
+ each argument influence and examples.
608
+
609
+ Legacy Example:
610
+ >>> import torch
611
+ >>> preds = torch.tensor([2, 0, 2, 1])
612
+ >>> target = torch.tensor([1, 1, 2, 0])
613
+ >>> precision = Precision(task="multiclass", average='macro', num_classes=3)
614
+ >>> precision(preds, target)
615
+ tensor(0.1667)
616
+ >>> precision = Precision(task="multiclass", average='micro', num_classes=3)
617
+ >>> precision(preds, target)
618
+ tensor(0.2500)
619
+ """
620
+
621
+ def __new__(
622
+ cls,
623
+ task: Literal["binary", "multiclass", "multilabel"],
624
+ threshold: float = 0.5,
625
+ num_classes: Optional[int] = None,
626
+ num_labels: Optional[int] = None,
627
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
628
+ multidim_average: Optional[Literal["global", "samplewise"]] = "global",
629
+ top_k: Optional[int] = 1,
630
+ ignore_index: Optional[int] = None,
631
+ validate_args: bool = True,
632
+ **kwargs: Any,
633
+ ) -> Metric:
634
+ assert multidim_average is not None
635
+ kwargs.update(dict(multidim_average=multidim_average, ignore_index=ignore_index, validate_args=validate_args))
636
+ if task == "binary":
637
+ return BinaryPrecision(threshold, **kwargs)
638
+ if task == "multiclass":
639
+ assert isinstance(num_classes, int)
640
+ assert isinstance(top_k, int)
641
+ return MulticlassPrecision(num_classes, top_k, average, **kwargs)
642
+ if task == "multilabel":
643
+ assert isinstance(num_labels, int)
644
+ return MultilabelPrecision(num_labels, threshold, average, **kwargs)
645
+ raise ValueError(
646
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
647
+ )
648
+
649
+
650
+ class Recall:
651
+ r"""Computes `Recall`_:
652
+
653
+ .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
654
+
655
+ Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
656
+ false negatives respecitively.
657
+
658
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
659
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
660
+ :mod:`BinaryRecall`, :mod:`MulticlassRecall` and :mod:`MultilabelRecall` for the specific details of
661
+ each argument influence and examples.
662
+
663
+ Legacy Example:
664
+ >>> import torch
665
+ >>> preds = torch.tensor([2, 0, 2, 1])
666
+ >>> target = torch.tensor([1, 1, 2, 0])
667
+ >>> recall = Recall(task="multiclass", average='macro', num_classes=3)
668
+ >>> recall(preds, target)
669
+ tensor(0.3333)
670
+ >>> recall = Recall(task="multiclass", average='micro', num_classes=3)
671
+ >>> recall(preds, target)
672
+ tensor(0.2500)
673
+ """
674
+
675
+ def __new__(
676
+ cls,
677
+ task: Literal["binary", "multiclass", "multilabel"],
678
+ threshold: float = 0.5,
679
+ num_classes: Optional[int] = None,
680
+ num_labels: Optional[int] = None,
681
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
682
+ multidim_average: Optional[Literal["global", "samplewise"]] = "global",
683
+ top_k: Optional[int] = 1,
684
+ ignore_index: Optional[int] = None,
685
+ validate_args: bool = True,
686
+ **kwargs: Any,
687
+ ) -> Metric:
688
+ assert multidim_average is not None
689
+ kwargs.update(dict(multidim_average=multidim_average, ignore_index=ignore_index, validate_args=validate_args))
690
+ if task == "binary":
691
+ return BinaryRecall(threshold, **kwargs)
692
+ if task == "multiclass":
693
+ assert isinstance(num_classes, int)
694
+ assert isinstance(top_k, int)
695
+ return MulticlassRecall(num_classes, top_k, average, **kwargs)
696
+ if task == "multilabel":
697
+ assert isinstance(num_labels, int)
698
+ return MultilabelRecall(num_labels, threshold, average, **kwargs)
699
+ raise ValueError(
700
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
701
+ )
wemm/lib/python3.10/site-packages/torchmetrics/classification/precision_recall_curve.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, List, Optional, Tuple, Union
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.precision_recall_curve import (
21
+ _adjust_threshold_arg,
22
+ _binary_precision_recall_curve_arg_validation,
23
+ _binary_precision_recall_curve_compute,
24
+ _binary_precision_recall_curve_format,
25
+ _binary_precision_recall_curve_tensor_validation,
26
+ _binary_precision_recall_curve_update,
27
+ _multiclass_precision_recall_curve_arg_validation,
28
+ _multiclass_precision_recall_curve_compute,
29
+ _multiclass_precision_recall_curve_format,
30
+ _multiclass_precision_recall_curve_tensor_validation,
31
+ _multiclass_precision_recall_curve_update,
32
+ _multilabel_precision_recall_curve_arg_validation,
33
+ _multilabel_precision_recall_curve_compute,
34
+ _multilabel_precision_recall_curve_format,
35
+ _multilabel_precision_recall_curve_tensor_validation,
36
+ _multilabel_precision_recall_curve_update,
37
+ )
38
+ from torchmetrics.metric import Metric
39
+ from torchmetrics.utilities.data import dim_zero_cat
40
+
41
+
42
+ class BinaryPrecisionRecallCurve(Metric):
43
+ r"""Computes the precision-recall curve for binary tasks. The curve consist of multiple pairs of precision and
44
+ recall values evaluated at different thresholds, such that the tradeoff between the two values can been seen.
45
+
46
+ As input to ``forward`` and ``update`` the metric accepts the following input:
47
+
48
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)``. Preds should be a tensor containing
49
+ probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input
50
+ to be logits and will auto apply sigmoid per element.
51
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
52
+ ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified). The value
53
+ 1 always encodes the positive class.
54
+
55
+ .. note::
56
+ Additional dimension ``...`` will be flattened into the batch dimension.
57
+
58
+ As output to ``forward`` and ``compute`` the metric returns the following output:
59
+
60
+ - ``precision`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d
61
+ tensor of size ``(n_thresholds+1, )`` with precision values (length may differ between classes). If `thresholds`
62
+ is set to something else, then a single 2d tensor of size ``(n_classes, n_thresholds+1)`` with precision values
63
+ is returned.
64
+ - ``recall`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d tensor
65
+ of size ``(n_thresholds+1, )`` with recall values (length may differ between classes). If `thresholds` is set to
66
+ something else, then a single 2d tensor of size ``(n_classes, n_thresholds+1)`` with recall values is returned.
67
+ - ``thresholds`` (:class:`~torch.Tensor`): if `thresholds=None` a list for each class is returned with an 1d
68
+ tensor of size ``(n_thresholds, )`` with increasing threshold values (length may differ between classes). If
69
+ `threshold` is set to something else, then a single 1d tensor of size ``(n_thresholds, )`` is returned with
70
+ shared threshold values for all classes.
71
+
72
+ .. note::
73
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
74
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
75
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
76
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
77
+ size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
78
+
79
+ Args:
80
+ thresholds:
81
+ Can be one of:
82
+
83
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
84
+ all the data. Most accurate but also most memory consuming approach.
85
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
86
+ 0 to 1 as bins for the calculation.
87
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
88
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
89
+ bins for the calculation.
90
+
91
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
92
+ Set to ``False`` for faster computations.
93
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
94
+
95
+ Example:
96
+ >>> from torchmetrics.classification import BinaryPrecisionRecallCurve
97
+ >>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
98
+ >>> target = torch.tensor([0, 1, 1, 0])
99
+ >>> bprc = BinaryPrecisionRecallCurve(thresholds=None)
100
+ >>> bprc(preds, target) # doctest: +NORMALIZE_WHITESPACE
101
+ (tensor([0.6667, 0.5000, 0.0000, 1.0000]),
102
+ tensor([1.0000, 0.5000, 0.0000, 0.0000]),
103
+ tensor([0.5000, 0.7000, 0.8000]))
104
+ >>> bprc = BinaryPrecisionRecallCurve(thresholds=5)
105
+ >>> bprc(preds, target) # doctest: +NORMALIZE_WHITESPACE
106
+ (tensor([0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000]),
107
+ tensor([1., 1., 1., 0., 0., 0.]),
108
+ tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
109
+ """
110
+ is_differentiable: bool = False
111
+ higher_is_better: Optional[bool] = None
112
+ full_state_update: bool = False
113
+
114
+ def __init__(
115
+ self,
116
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
117
+ ignore_index: Optional[int] = None,
118
+ validate_args: bool = True,
119
+ **kwargs: Any,
120
+ ) -> None:
121
+ super().__init__(**kwargs)
122
+ if validate_args:
123
+ _binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
124
+
125
+ self.ignore_index = ignore_index
126
+ self.validate_args = validate_args
127
+
128
+ thresholds = _adjust_threshold_arg(thresholds)
129
+ if thresholds is None:
130
+ self.thresholds = thresholds
131
+ self.add_state("preds", default=[], dist_reduce_fx="cat")
132
+ self.add_state("target", default=[], dist_reduce_fx="cat")
133
+ else:
134
+ self.register_buffer("thresholds", thresholds)
135
+ self.add_state(
136
+ "confmat", default=torch.zeros(len(thresholds), 2, 2, dtype=torch.long), dist_reduce_fx="sum"
137
+ )
138
+
139
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
140
+ if self.validate_args:
141
+ _binary_precision_recall_curve_tensor_validation(preds, target, self.ignore_index)
142
+ preds, target, _ = _binary_precision_recall_curve_format(preds, target, self.thresholds, self.ignore_index)
143
+ state = _binary_precision_recall_curve_update(preds, target, self.thresholds)
144
+ if isinstance(state, Tensor):
145
+ self.confmat += state
146
+ else:
147
+ self.preds.append(state[0])
148
+ self.target.append(state[1])
149
+
150
+ def compute(self) -> Tuple[Tensor, Tensor, Tensor]:
151
+ if self.thresholds is None:
152
+ state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)]
153
+ else:
154
+ state = self.confmat
155
+ return _binary_precision_recall_curve_compute(state, self.thresholds)
156
+
157
+
158
+ class MulticlassPrecisionRecallCurve(Metric):
159
+ r"""Computes the precision-recall curve for multiclass tasks. The curve consist of multiple pairs of precision
160
+ and recall values evaluated at different thresholds, such that the tradeoff between the two values can been
161
+ seen.
162
+
163
+ As input to ``forward`` and ``update`` the metric accepts the following input:
164
+
165
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor containing
166
+ probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input to
167
+ be logits and will auto apply softmax per sample.
168
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``. Target should be a tensor containing
169
+ ground truth labels, and therefore only contain values in the [0, n_classes-1] range (except if `ignore_index`
170
+ is specified).
171
+
172
+ .. note::
173
+ Additional dimension ``...`` will be flattened into the batch dimension.
174
+
175
+ As output to ``forward`` and ``compute`` the metric returns the following output:
176
+
177
+ - ``precision`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds+1, )`` with precision values
178
+ - ``recall`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds+1, )`` with recall values
179
+ - ``thresholds`` (:class:`~torch.Tensor`): A 1d tensor of size ``(n_thresholds, )`` with increasing threshold values
180
+
181
+ .. note::
182
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
183
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
184
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
185
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
186
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
187
+
188
+ Args:
189
+ num_classes: Integer specifing the number of classes
190
+ thresholds:
191
+ Can be one of:
192
+
193
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
194
+ all the data. Most accurate but also most memory consuming approach.
195
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
196
+ 0 to 1 as bins for the calculation.
197
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
198
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
199
+ bins for the calculation.
200
+
201
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
202
+ Set to ``False`` for faster computations.
203
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
204
+
205
+ Example:
206
+ >>> from torchmetrics.classification import MulticlassPrecisionRecallCurve
207
+ >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
208
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
209
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
210
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
211
+ >>> target = torch.tensor([0, 1, 3, 2])
212
+ >>> mcprc = MulticlassPrecisionRecallCurve(num_classes=5, thresholds=None)
213
+ >>> precision, recall, thresholds = mcprc(preds, target)
214
+ >>> precision # doctest: +NORMALIZE_WHITESPACE
215
+ [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]),
216
+ tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])]
217
+ >>> recall
218
+ [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])]
219
+ >>> thresholds
220
+ [tensor(0.7500), tensor(0.7500), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor(0.0500)]
221
+ >>> mcprc = MulticlassPrecisionRecallCurve(num_classes=5, thresholds=5)
222
+ >>> mcprc(preds, target) # doctest: +NORMALIZE_WHITESPACE
223
+ (tensor([[0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000],
224
+ [0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000],
225
+ [0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
226
+ [0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
227
+ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
228
+ tensor([[1., 1., 1., 1., 0., 0.],
229
+ [1., 1., 1., 1., 0., 0.],
230
+ [1., 0., 0., 0., 0., 0.],
231
+ [1., 0., 0., 0., 0., 0.],
232
+ [0., 0., 0., 0., 0., 0.]]),
233
+ tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
234
+ """
235
+ is_differentiable: bool = False
236
+ higher_is_better: Optional[bool] = None
237
+ full_state_update: bool = False
238
+
239
+ def __init__(
240
+ self,
241
+ num_classes: int,
242
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
243
+ ignore_index: Optional[int] = None,
244
+ validate_args: bool = True,
245
+ **kwargs: Any,
246
+ ) -> None:
247
+ super().__init__(**kwargs)
248
+ if validate_args:
249
+ _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)
250
+
251
+ self.num_classes = num_classes
252
+ self.ignore_index = ignore_index
253
+ self.validate_args = validate_args
254
+
255
+ thresholds = _adjust_threshold_arg(thresholds)
256
+ if thresholds is None:
257
+ self.thresholds = thresholds
258
+ self.add_state("preds", default=[], dist_reduce_fx="cat")
259
+ self.add_state("target", default=[], dist_reduce_fx="cat")
260
+ else:
261
+ self.register_buffer("thresholds", thresholds)
262
+ self.add_state(
263
+ "confmat",
264
+ default=torch.zeros(len(thresholds), num_classes, 2, 2, dtype=torch.long),
265
+ dist_reduce_fx="sum",
266
+ )
267
+
268
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
269
+ if self.validate_args:
270
+ _multiclass_precision_recall_curve_tensor_validation(preds, target, self.num_classes, self.ignore_index)
271
+ preds, target, _ = _multiclass_precision_recall_curve_format(
272
+ preds, target, self.num_classes, self.thresholds, self.ignore_index
273
+ )
274
+ state = _multiclass_precision_recall_curve_update(preds, target, self.num_classes, self.thresholds)
275
+ if isinstance(state, Tensor):
276
+ self.confmat += state
277
+ else:
278
+ self.preds.append(state[0])
279
+ self.target.append(state[1])
280
+
281
+ def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
282
+ if self.thresholds is None:
283
+ state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)]
284
+ else:
285
+ state = self.confmat
286
+ return _multiclass_precision_recall_curve_compute(state, self.num_classes, self.thresholds)
287
+
288
+
289
+ class MultilabelPrecisionRecallCurve(Metric):
290
+ r"""Computes the precision-recall curve for multilabel tasks. The curve consist of multiple pairs of precision
291
+ and recall values evaluated at different thresholds, such that the tradeoff between the two values can been
292
+ seen.
293
+
294
+ As input to ``forward`` and ``update`` the metric accepts the following input:
295
+
296
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor containing
297
+ probabilities or logits for each observation. If preds has values outside [0,1] range we consider the input to
298
+ be logits and will auto apply sigmoid per element.
299
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor containing
300
+ ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified).
301
+
302
+ .. note::
303
+ Additional dimension ``...`` will be flattened into the batch dimension.
304
+
305
+ As output to ``forward`` and ``compute`` the metric returns the following a tuple of either 3 tensors or
306
+ 3 lists containing:
307
+
308
+ - ``precision`` (:class:`~torch.Tensor` or :class:`~List`): if `thresholds=None` a list for each label is returned
309
+ with an 1d tensor of size ``(n_thresholds+1, )`` with precision values (length may differ between labels). If
310
+ `thresholds` is set to something else, then a single 2d tensor of size ``(n_labels, n_thresholds+1)`` with
311
+ precision values is returned.
312
+ - ``recall`` (:class:`~torch.Tensor` or :class:`~List`): if `thresholds=None` a list for each label is returned
313
+ with an 1d tensor of size ``(n_thresholds+1, )`` with recall values (length may differ between labels). If
314
+ `thresholds` is set to something else, then a single 2d tensor of size ``(n_labels, n_thresholds+1)`` with recall
315
+ values is returned.
316
+ - ``thresholds`` (:class:`~torch.Tensor` or :class:`~List`): if `thresholds=None` a list for each label is
317
+ returned with an 1d tensor of size ``(n_thresholds, )`` with increasing threshold values (length may differ
318
+ between labels). If `threshold` is set to something else, then a single 1d tensor of size ``(n_thresholds, )``
319
+ is returned with shared threshold values for all labels.
320
+
321
+ .. note::
322
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
323
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
324
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
325
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
326
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
327
+
328
+ Args:
329
+ preds: Tensor with predictions
330
+ target: Tensor with true labels
331
+ num_labels: Integer specifing the number of labels
332
+ thresholds:
333
+ Can be one of:
334
+
335
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
336
+ all the data. Most accurate but also most memory consuming approach.
337
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
338
+ 0 to 1 as bins for the calculation.
339
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
340
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
341
+ bins for the calculation.
342
+
343
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
344
+ Set to ``False`` for faster computations.
345
+
346
+ Example:
347
+ >>> from torchmetrics.classification import MultilabelPrecisionRecallCurve
348
+ >>> preds = torch.tensor([[0.75, 0.05, 0.35],
349
+ ... [0.45, 0.75, 0.05],
350
+ ... [0.05, 0.55, 0.75],
351
+ ... [0.05, 0.65, 0.05]])
352
+ >>> target = torch.tensor([[1, 0, 1],
353
+ ... [0, 0, 0],
354
+ ... [0, 1, 1],
355
+ ... [1, 1, 1]])
356
+ >>> mlprc = MultilabelPrecisionRecallCurve(num_labels=3, thresholds=None)
357
+ >>> precision, recall, thresholds = mlprc(preds, target)
358
+ >>> precision # doctest: +NORMALIZE_WHITESPACE
359
+ [tensor([0.5000, 0.5000, 1.0000, 1.0000]), tensor([0.6667, 0.5000, 0.0000, 1.0000]),
360
+ tensor([0.7500, 1.0000, 1.0000, 1.0000])]
361
+ >>> recall # doctest: +NORMALIZE_WHITESPACE
362
+ [tensor([1.0000, 0.5000, 0.5000, 0.0000]), tensor([1.0000, 0.5000, 0.0000, 0.0000]),
363
+ tensor([1.0000, 0.6667, 0.3333, 0.0000])]
364
+ >>> thresholds # doctest: +NORMALIZE_WHITESPACE
365
+ [tensor([0.0500, 0.4500, 0.7500]), tensor([0.5500, 0.6500, 0.7500]),
366
+ tensor([0.0500, 0.3500, 0.7500])]
367
+ >>> mlprc = MultilabelPrecisionRecallCurve(num_labels=3, thresholds=5)
368
+ >>> mlprc(preds, target) # doctest: +NORMALIZE_WHITESPACE
369
+ (tensor([[0.5000, 0.5000, 1.0000, 1.0000, 0.0000, 1.0000],
370
+ [0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000],
371
+ [0.7500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000]]),
372
+ tensor([[1.0000, 0.5000, 0.5000, 0.5000, 0.0000, 0.0000],
373
+ [1.0000, 1.0000, 1.0000, 0.0000, 0.0000, 0.0000],
374
+ [1.0000, 0.6667, 0.3333, 0.3333, 0.0000, 0.0000]]),
375
+ tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
376
+ """
377
+ is_differentiable: bool = False
378
+ higher_is_better: Optional[bool] = None
379
+ full_state_update: bool = False
380
+
381
+ def __init__(
382
+ self,
383
+ num_labels: int,
384
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
385
+ ignore_index: Optional[int] = None,
386
+ validate_args: bool = True,
387
+ **kwargs: Any,
388
+ ) -> None:
389
+ super().__init__(**kwargs)
390
+ if validate_args:
391
+ _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
392
+
393
+ self.num_labels = num_labels
394
+ self.ignore_index = ignore_index
395
+ self.validate_args = validate_args
396
+
397
+ thresholds = _adjust_threshold_arg(thresholds)
398
+ if thresholds is None:
399
+ self.thresholds = thresholds
400
+ self.add_state("preds", default=[], dist_reduce_fx="cat")
401
+ self.add_state("target", default=[], dist_reduce_fx="cat")
402
+ else:
403
+ self.register_buffer("thresholds", thresholds)
404
+ self.add_state(
405
+ "confmat",
406
+ default=torch.zeros(len(thresholds), num_labels, 2, 2, dtype=torch.long),
407
+ dist_reduce_fx="sum",
408
+ )
409
+
410
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
411
+ if self.validate_args:
412
+ _multilabel_precision_recall_curve_tensor_validation(preds, target, self.num_labels, self.ignore_index)
413
+ preds, target, _ = _multilabel_precision_recall_curve_format(
414
+ preds, target, self.num_labels, self.thresholds, self.ignore_index
415
+ )
416
+ state = _multilabel_precision_recall_curve_update(preds, target, self.num_labels, self.thresholds)
417
+ if isinstance(state, Tensor):
418
+ self.confmat += state
419
+ else:
420
+ self.preds.append(state[0])
421
+ self.target.append(state[1])
422
+
423
+ def compute(self) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
424
+ if self.thresholds is None:
425
+ state = [dim_zero_cat(self.preds), dim_zero_cat(self.target)]
426
+ else:
427
+ state = self.confmat
428
+ return _multilabel_precision_recall_curve_compute(state, self.num_labels, self.thresholds, self.ignore_index)
429
+
430
+
431
+ class PrecisionRecallCurve:
432
+ r"""Computes the precision-recall curve. The curve consist of multiple pairs of precision and recall values
433
+ evaluated at different thresholds, such that the tradeoff between the two values can been seen.
434
+
435
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
436
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
437
+ :mod:`BinaryPrecisionRecallCurve`, :mod:`MulticlassPrecisionRecallCurve` and
438
+ :mod:`MultilabelPrecisionRecallCurve` for the specific details of each argument influence and examples.
439
+
440
+ Legacy Example:
441
+ >>> pred = torch.tensor([0, 0.1, 0.8, 0.4])
442
+ >>> target = torch.tensor([0, 1, 1, 0])
443
+ >>> pr_curve = PrecisionRecallCurve(task="binary")
444
+ >>> precision, recall, thresholds = pr_curve(pred, target)
445
+ >>> precision
446
+ tensor([0.6667, 0.5000, 1.0000, 1.0000])
447
+ >>> recall
448
+ tensor([1.0000, 0.5000, 0.5000, 0.0000])
449
+ >>> thresholds
450
+ tensor([0.1000, 0.4000, 0.8000])
451
+
452
+ >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
453
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
454
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
455
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
456
+ >>> target = torch.tensor([0, 1, 3, 2])
457
+ >>> pr_curve = PrecisionRecallCurve(task="multiclass", num_classes=5)
458
+ >>> precision, recall, thresholds = pr_curve(pred, target)
459
+ >>> precision
460
+ [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]),
461
+ tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])]
462
+ >>> recall
463
+ [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])]
464
+ >>> thresholds
465
+ [tensor(0.7500), tensor(0.7500), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor(0.0500)]
466
+ """
467
+
468
+ def __new__(
469
+ cls,
470
+ task: Literal["binary", "multiclass", "multilabel"],
471
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
472
+ num_classes: Optional[int] = None,
473
+ num_labels: Optional[int] = None,
474
+ ignore_index: Optional[int] = None,
475
+ validate_args: bool = True,
476
+ **kwargs: Any,
477
+ ) -> Metric:
478
+ kwargs.update(dict(thresholds=thresholds, ignore_index=ignore_index, validate_args=validate_args))
479
+ if task == "binary":
480
+ return BinaryPrecisionRecallCurve(**kwargs)
481
+ if task == "multiclass":
482
+ assert isinstance(num_classes, int)
483
+ return MulticlassPrecisionRecallCurve(num_classes, **kwargs)
484
+ if task == "multilabel":
485
+ assert isinstance(num_labels, int)
486
+ return MultilabelPrecisionRecallCurve(num_labels, **kwargs)
487
+ raise ValueError(
488
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
489
+ )
wemm/lib/python3.10/site-packages/torchmetrics/classification/ranking.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+
19
+ from torchmetrics.functional.classification.ranking import (
20
+ _multilabel_confusion_matrix_arg_validation,
21
+ _multilabel_confusion_matrix_format,
22
+ _multilabel_coverage_error_update,
23
+ _multilabel_ranking_average_precision_update,
24
+ _multilabel_ranking_loss_update,
25
+ _multilabel_ranking_tensor_validation,
26
+ _ranking_reduce,
27
+ )
28
+ from torchmetrics.metric import Metric
29
+
30
+
31
+ class MultilabelCoverageError(Metric):
32
+ """Computes `Multilabel coverage error`_. The score measure how far we need to go through the ranked scores to
33
+ cover all true labels. The best value is equal to the average number of labels in the target tensor per sample.
34
+
35
+ As input to ``forward`` and ``update`` the metric accepts the following input:
36
+
37
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
38
+ containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
39
+ the input to be logits and will auto apply sigmoid per element.
40
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor
41
+ containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified).
42
+
43
+ .. note::
44
+ Additional dimension ``...`` will be flattened into the batch dimension.
45
+
46
+ As output to ``forward`` and ``compute`` the metric returns the following output:
47
+
48
+ - ``mlce`` (:class:`~torch.Tensor`): A tensor containing the multilabel coverage error.
49
+
50
+ Args:
51
+ num_labels: Integer specifing the number of labels
52
+ ignore_index:
53
+ Specifies a target value that is ignored and does not contribute to the metric calculation
54
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
55
+ Set to ``False`` for faster computations.
56
+
57
+ Example:
58
+ >>> from torchmetrics.classification import MultilabelCoverageError
59
+ >>> _ = torch.manual_seed(42)
60
+ >>> preds = torch.rand(10, 5)
61
+ >>> target = torch.randint(2, (10, 5))
62
+ >>> mlce = MultilabelCoverageError(num_labels=5)
63
+ >>> mlce(preds, target)
64
+ tensor(3.9000)
65
+ """
66
+
67
+ higher_is_better: bool = False
68
+ is_differentiable: bool = False
69
+ full_state_update: bool = False
70
+
71
+ def __init__(
72
+ self,
73
+ num_labels: int,
74
+ ignore_index: Optional[int] = None,
75
+ validate_args: bool = True,
76
+ **kwargs: Any,
77
+ ) -> None:
78
+ super().__init__(**kwargs)
79
+ if validate_args:
80
+ _multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index)
81
+ self.validate_args = validate_args
82
+ self.num_labels = num_labels
83
+ self.ignore_index = ignore_index
84
+ self.add_state("measure", torch.tensor(0.0), dist_reduce_fx="sum")
85
+ self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
86
+
87
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
88
+ if self.validate_args:
89
+ _multilabel_ranking_tensor_validation(preds, target, self.num_labels, self.ignore_index)
90
+ preds, target = _multilabel_confusion_matrix_format(
91
+ preds, target, self.num_labels, threshold=0.0, ignore_index=self.ignore_index, should_threshold=False
92
+ )
93
+ measure, n_elements = _multilabel_coverage_error_update(preds, target)
94
+ self.measure += measure
95
+ self.total += n_elements
96
+
97
+ def compute(self) -> Tensor:
98
+ return _ranking_reduce(self.measure, self.total)
99
+
100
+
101
+ class MultilabelRankingAveragePrecision(Metric):
102
+ """Computes label ranking average precision score for multilabel data [1]. The score is the average over each
103
+ ground truth label assigned to each sample of the ratio of true vs. total labels with lower score. Best score
104
+ is 1.
105
+
106
+ As input to ``forward`` and ``update`` the metric accepts the following input:
107
+
108
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
109
+ containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
110
+ the input to be logits and will auto apply sigmoid per element.
111
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor
112
+ containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified).
113
+
114
+ .. note::
115
+ Additional dimension ``...`` will be flattened into the batch dimension.
116
+
117
+ As output to ``forward`` and ``compute`` the metric returns the following output:
118
+
119
+ - ``mlrap`` (:class:`~torch.Tensor`): A tensor containing the multilabel ranking average precision.
120
+
121
+ Args:
122
+ num_labels: Integer specifing the number of labels
123
+ ignore_index:
124
+ Specifies a target value that is ignored and does not contribute to the metric calculation
125
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
126
+ Set to ``False`` for faster computations.
127
+
128
+ Example:
129
+ >>> from torchmetrics.classification import MultilabelRankingAveragePrecision
130
+ >>> _ = torch.manual_seed(42)
131
+ >>> preds = torch.rand(10, 5)
132
+ >>> target = torch.randint(2, (10, 5))
133
+ >>> mlrap = MultilabelRankingAveragePrecision(num_labels=5)
134
+ >>> mlrap(preds, target)
135
+ tensor(0.7744)
136
+ """
137
+
138
+ higher_is_better: bool = True
139
+ is_differentiable: bool = False
140
+ full_state_update: bool = False
141
+
142
+ def __init__(
143
+ self,
144
+ num_labels: int,
145
+ ignore_index: Optional[int] = None,
146
+ validate_args: bool = True,
147
+ **kwargs: Any,
148
+ ) -> None:
149
+ super().__init__(**kwargs)
150
+ if validate_args:
151
+ _multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index)
152
+ self.validate_args = validate_args
153
+ self.num_labels = num_labels
154
+ self.ignore_index = ignore_index
155
+ self.add_state("measure", torch.tensor(0.0), dist_reduce_fx="sum")
156
+ self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
157
+
158
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
159
+ if self.validate_args:
160
+ _multilabel_ranking_tensor_validation(preds, target, self.num_labels, self.ignore_index)
161
+ preds, target = _multilabel_confusion_matrix_format(
162
+ preds, target, self.num_labels, threshold=0.0, ignore_index=self.ignore_index, should_threshold=False
163
+ )
164
+ measure, n_elements = _multilabel_ranking_average_precision_update(preds, target)
165
+ self.measure += measure
166
+ self.total += n_elements
167
+
168
+ def compute(self) -> Tensor:
169
+ return _ranking_reduce(self.measure, self.total)
170
+
171
+
172
+ class MultilabelRankingLoss(Metric):
173
+ """Computes the label ranking loss for multilabel data [1]. The score is corresponds to the average number of
174
+ label pairs that are incorrectly ordered given some predictions weighted by the size of the label set and the
175
+ number of labels not in the label set. The best score is 0.
176
+
177
+ As input to ``forward`` and ``update`` the metric accepts the following input:
178
+
179
+ - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)``. Preds should be a tensor
180
+ containing probabilities or logits for each observation. If preds has values outside [0,1] range we consider
181
+ the input to be logits and will auto apply sigmoid per element.
182
+ - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``. Target should be a tensor
183
+ containing ground truth labels, and therefore only contain {0,1} values (except if `ignore_index` is specified).
184
+
185
+ .. note::
186
+ Additional dimension ``...`` will be flattened into the batch dimension.
187
+
188
+ As output to ``forward`` and ``compute`` the metric returns the following output:
189
+
190
+ - ``mlrl`` (:class:`~torch.Tensor`): A tensor containing the multilabel ranking loss.
191
+
192
+ Args:
193
+ preds: Tensor with predictions
194
+ target: Tensor with true labels
195
+ num_labels: Integer specifing the number of labels
196
+ ignore_index:
197
+ Specifies a target value that is ignored and does not contribute to the metric calculation
198
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
199
+ Set to ``False`` for faster computations.
200
+
201
+ Example:
202
+ >>> from torchmetrics.classification import MultilabelRankingLoss
203
+ >>> _ = torch.manual_seed(42)
204
+ >>> preds = torch.rand(10, 5)
205
+ >>> target = torch.randint(2, (10, 5))
206
+ >>> mlrl = MultilabelRankingLoss(num_labels=5)
207
+ >>> mlrl(preds, target)
208
+ tensor(0.4167)
209
+ """
210
+
211
+ higher_is_better: bool = False
212
+ is_differentiable: bool = False
213
+ full_state_update: bool = False
214
+
215
+ def __init__(
216
+ self,
217
+ num_labels: int,
218
+ ignore_index: Optional[int] = None,
219
+ validate_args: bool = True,
220
+ **kwargs: Any,
221
+ ) -> None:
222
+ super().__init__(**kwargs)
223
+ if validate_args:
224
+ _multilabel_confusion_matrix_arg_validation(num_labels, threshold=0.0, ignore_index=ignore_index)
225
+ self.validate_args = validate_args
226
+ self.num_labels = num_labels
227
+ self.ignore_index = ignore_index
228
+ self.add_state("measure", torch.tensor(0.0), dist_reduce_fx="sum")
229
+ self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
230
+
231
+ def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
232
+ if self.validate_args:
233
+ _multilabel_ranking_tensor_validation(preds, target, self.num_labels, self.ignore_index)
234
+ preds, target = _multilabel_confusion_matrix_format(
235
+ preds, target, self.num_labels, threshold=0.0, ignore_index=self.ignore_index, should_threshold=False
236
+ )
237
+ measure, n_elements = _multilabel_ranking_loss_update(preds, target)
238
+ self.measure += measure
239
+ self.total += n_elements
240
+
241
+ def compute(self) -> Tensor:
242
+ return _ranking_reduce(self.measure, self.total)
wemm/lib/python3.10/site-packages/torchmetrics/collections.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # this is just a bypass for this module name collision with build-in one
15
+ from collections import OrderedDict
16
+ from copy import deepcopy
17
+ from typing import Any, Dict, Hashable, Iterable, List, Optional, Sequence, Tuple, Union
18
+
19
+ import torch
20
+ from torch import Tensor
21
+ from torch.nn import Module, ModuleDict
22
+
23
+ from torchmetrics.metric import Metric
24
+ from torchmetrics.utilities import rank_zero_warn
25
+ from torchmetrics.utilities.data import _flatten_dict, allclose
26
+
27
+
28
+ class MetricCollection(ModuleDict):
29
+ """MetricCollection class can be used to chain metrics that have the same call pattern into one single class.
30
+
31
+ Args:
32
+ metrics: One of the following
33
+
34
+ * list or tuple (sequence): if metrics are passed in as a list or tuple, will use the metrics class name
35
+ as key for output dict. Therefore, two metrics of the same class cannot be chained this way.
36
+
37
+ * arguments: similar to passing in as a list, metrics passed in as arguments will use their metric
38
+ class name as key for the output dict.
39
+
40
+ * dict: if metrics are passed in as a dict, will use each key in the dict as key for output dict.
41
+ Use this format if you want to chain together multiple of the same metric with different parameters.
42
+ Note that the keys in the output dict will be sorted alphabetically.
43
+
44
+ prefix: a string to append in front of the keys of the output dict
45
+
46
+ postfix: a string to append after the keys of the output dict
47
+
48
+ compute_groups:
49
+ By default the MetricCollection will try to reduce the computations needed for the metrics in the collection
50
+ by checking if they belong to the same **compute group**. All metrics in a compute group share the same
51
+ metric state and are therefore only different in their compute step e.g. accuracy, precision and recall
52
+ can all be computed from the true positives/negatives and false positives/negatives. By default,
53
+ this argument is ``True`` which enables this feature. Set this argument to `False` for disabling
54
+ this behaviour. Can also be set to a list of lists of metrics for setting the compute groups yourself.
55
+
56
+ .. note::
57
+ The compute groups feature can significatly speedup the calculation of metrics under the right conditions.
58
+ First, the feature is only available when calling the ``update`` method and not when calling ``forward`` method
59
+ due to the internal logic of ``forward`` preventing this. Secondly, since we compute groups share metric
60
+ states by reference, calling ``.items()``, ``.values()`` etc. on the metric collection will break this
61
+ reference and a copy of states are instead returned in this case (reference will be reestablished on the next
62
+ call to ``update``).
63
+
64
+ .. note::
65
+ Metric collections can be nested at initilization (see last example) but the output of the collection will
66
+ still be a single flatten dictionary combining the prefix and postfix arguments from the nested collection.
67
+
68
+ Raises:
69
+ ValueError:
70
+ If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``.
71
+ ValueError:
72
+ If two elements in ``metrics`` have the same ``name``.
73
+ ValueError:
74
+ If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``.
75
+ ValueError:
76
+ If ``metrics`` is ``dict`` and additional_metrics are passed in.
77
+ ValueError:
78
+ If ``prefix`` is set and it is not a string.
79
+ ValueError:
80
+ If ``postfix`` is set and it is not a string.
81
+
82
+ Example (input as list):
83
+ >>> import torch
84
+ >>> from pprint import pprint
85
+ >>> from torchmetrics import MetricCollection, MeanSquaredError
86
+ >>> from torchmetrics.classification import MulticlassAccuracy, MulticlassPrecision, MulticlassRecall
87
+ >>> target = torch.tensor([0, 2, 0, 2, 0, 1, 0, 2])
88
+ >>> preds = torch.tensor([2, 1, 2, 0, 1, 2, 2, 2])
89
+ >>> metrics = MetricCollection([MulticlassAccuracy(num_classes=3, average='micro'),
90
+ ... MulticlassPrecision(num_classes=3, average='macro'),
91
+ ... MulticlassRecall(num_classes=3, average='macro')])
92
+ >>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE
93
+ {'MulticlassAccuracy': tensor(0.1250),
94
+ 'MulticlassPrecision': tensor(0.0667),
95
+ 'MulticlassRecall': tensor(0.1111)}
96
+
97
+ Example (input as arguments):
98
+ >>> metrics = MetricCollection(MulticlassAccuracy(num_classes=3, average='micro'),
99
+ ... MulticlassPrecision(num_classes=3, average='macro'),
100
+ ... MulticlassRecall(num_classes=3, average='macro'))
101
+ >>> metrics(preds, target) # doctest: +NORMALIZE_WHITESPACE
102
+ {'MulticlassAccuracy': tensor(0.1250),
103
+ 'MulticlassPrecision': tensor(0.0667),
104
+ 'MulticlassRecall': tensor(0.1111)}
105
+
106
+ Example (input as dict):
107
+ >>> metrics = MetricCollection({'micro_recall': MulticlassRecall(num_classes=3, average='micro'),
108
+ ... 'macro_recall': MulticlassRecall(num_classes=3, average='macro')})
109
+ >>> same_metric = metrics.clone()
110
+ >>> pprint(metrics(preds, target))
111
+ {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}
112
+ >>> pprint(same_metric(preds, target))
113
+ {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}
114
+
115
+ Example (specification of compute groups):
116
+ >>> metrics = MetricCollection(
117
+ ... MulticlassRecall(num_classes=3, average='macro'),
118
+ ... MulticlassPrecision(num_classes=3, average='macro'),
119
+ ... MeanSquaredError(),
120
+ ... compute_groups=[['MulticlassRecall', 'MulticlassPrecision'], ['MeanSquaredError']]
121
+ ... )
122
+ >>> metrics.update(preds, target)
123
+ >>> pprint(metrics.compute())
124
+ {'MeanSquaredError': tensor(2.3750), 'MulticlassPrecision': tensor(0.0667), 'MulticlassRecall': tensor(0.1111)}
125
+ >>> pprint(metrics.compute_groups)
126
+ {0: ['MulticlassRecall', 'MulticlassPrecision'], 1: ['MeanSquaredError']}
127
+
128
+ Example (nested metric collections):
129
+ >>> metrics = MetricCollection([
130
+ ... MetricCollection([
131
+ ... MulticlassAccuracy(num_classes=3, average='macro'),
132
+ ... MulticlassPrecision(num_classes=3, average='macro')
133
+ ... ], postfix='_macro'),
134
+ ... MetricCollection([
135
+ ... MulticlassAccuracy(num_classes=3, average='micro'),
136
+ ... MulticlassPrecision(num_classes=3, average='micro')
137
+ ... ], postfix='_micro'),
138
+ ... ], prefix='valmetrics/')
139
+ >>> pprint(metrics(preds, target)) # doctest: +NORMALIZE_WHITESPACE
140
+ {'valmetrics/MulticlassAccuracy_macro': tensor(0.1111),
141
+ 'valmetrics/MulticlassAccuracy_micro': tensor(0.1250),
142
+ 'valmetrics/MulticlassPrecision_macro': tensor(0.0667),
143
+ 'valmetrics/MulticlassPrecision_micro': tensor(0.1250)}
144
+ """
145
+
146
+ _groups: Dict[int, List[str]]
147
+
148
+ def __init__(
149
+ self,
150
+ metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]],
151
+ *additional_metrics: Metric,
152
+ prefix: Optional[str] = None,
153
+ postfix: Optional[str] = None,
154
+ compute_groups: Union[bool, List[List[str]]] = True,
155
+ ) -> None:
156
+ super().__init__()
157
+
158
+ self.prefix = self._check_arg(prefix, "prefix")
159
+ self.postfix = self._check_arg(postfix, "postfix")
160
+ self._enable_compute_groups = compute_groups
161
+ self._groups_checked: bool = False
162
+ self._state_is_copy: bool = False
163
+
164
+ self.add_metrics(metrics, *additional_metrics)
165
+
166
+ @torch.jit.unused
167
+ def forward(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
168
+ """Iteratively call forward for each metric.
169
+
170
+ Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs)
171
+ will be filtered based on the signature of the individual metric.
172
+ """
173
+ res = {k: m(*args, **m._filter_kwargs(**kwargs)) for k, m in self.items(keep_base=True, copy_state=False)}
174
+ res = _flatten_dict(res)
175
+ return {self._set_name(k): v for k, v in res.items()}
176
+
177
+ def update(self, *args: Any, **kwargs: Any) -> None:
178
+ """Iteratively call update for each metric.
179
+
180
+ Positional arguments (args) will be passed to every metric in the collection, while keyword arguments (kwargs)
181
+ will be filtered based on the signature of the individual metric.
182
+ """
183
+ # Use compute groups if already initialized and checked
184
+ if self._groups_checked:
185
+ for _, cg in self._groups.items():
186
+ # only update the first member
187
+ m0 = getattr(self, cg[0])
188
+ m0.update(*args, **m0._filter_kwargs(**kwargs))
189
+ if self._state_is_copy:
190
+ # If we have deep copied state inbetween updates, reestablish link
191
+ self._compute_groups_create_state_ref()
192
+ self._state_is_copy = False
193
+ else: # the first update always do per metric to form compute groups
194
+ for _, m in self.items(keep_base=True, copy_state=False):
195
+ m_kwargs = m._filter_kwargs(**kwargs)
196
+ m.update(*args, **m_kwargs)
197
+
198
+ if self._enable_compute_groups:
199
+ self._merge_compute_groups()
200
+ # create reference between states
201
+ self._compute_groups_create_state_ref()
202
+ self._groups_checked = True
203
+
204
+ def _merge_compute_groups(self) -> None:
205
+ """Iterates over the collection of metrics, checking if the state of each metric matches another.
206
+
207
+ If so, their compute groups will be merged into one. The complexity of the method is approximately
208
+ ``O(number_of_metrics_in_collection ** 2)``, as all metrics need to be compared to all other metrics.
209
+ """
210
+ n_groups = len(self._groups)
211
+ while True:
212
+ for cg_idx1, cg_members1 in deepcopy(self._groups).items():
213
+ for cg_idx2, cg_members2 in deepcopy(self._groups).items():
214
+ if cg_idx1 == cg_idx2:
215
+ continue
216
+
217
+ metric1 = getattr(self, cg_members1[0])
218
+ metric2 = getattr(self, cg_members2[0])
219
+
220
+ if self._equal_metric_states(metric1, metric2):
221
+ self._groups[cg_idx1].extend(self._groups.pop(cg_idx2))
222
+ break
223
+
224
+ # Start over if we merged groups
225
+ if len(self._groups) != n_groups:
226
+ break
227
+
228
+ # Stop when we iterate over everything and do not merge any groups
229
+ if len(self._groups) == n_groups:
230
+ break
231
+ else:
232
+ n_groups = len(self._groups)
233
+
234
+ # Re-index groups
235
+ temp = deepcopy(self._groups)
236
+ self._groups = {}
237
+ for idx, values in enumerate(temp.values()):
238
+ self._groups[idx] = values
239
+
240
+ @staticmethod
241
+ def _equal_metric_states(metric1: Metric, metric2: Metric) -> bool:
242
+ """Check if the metric state of two metrics are the same."""
243
+ # empty state
244
+ if len(metric1._defaults) == 0 or len(metric2._defaults) == 0:
245
+ return False
246
+
247
+ if metric1._defaults.keys() != metric2._defaults.keys():
248
+ return False
249
+
250
+ for key in metric1._defaults.keys():
251
+ state1 = getattr(metric1, key)
252
+ state2 = getattr(metric2, key)
253
+
254
+ if type(state1) != type(state2):
255
+ return False
256
+
257
+ if isinstance(state1, Tensor) and isinstance(state2, Tensor):
258
+ return state1.shape == state2.shape and allclose(state1, state2)
259
+
260
+ if isinstance(state1, list) and isinstance(state2, list):
261
+ return all(s1.shape == s2.shape and allclose(s1, s2) for s1, s2 in zip(state1, state2))
262
+
263
+ return True
264
+
265
+ def _compute_groups_create_state_ref(self, copy: bool = False) -> None:
266
+ """Create reference between metrics in the same compute group.
267
+
268
+ Args:
269
+ copy: If `True` the metric state will between members will be copied instead
270
+ of just passed by reference
271
+ """
272
+ if not self._state_is_copy:
273
+ for _, cg in self._groups.items():
274
+ m0 = getattr(self, cg[0])
275
+ for i in range(1, len(cg)):
276
+ mi = getattr(self, cg[i])
277
+ for state in m0._defaults:
278
+ m0_state = getattr(m0, state)
279
+ # Determine if we just should set a reference or a full copy
280
+ setattr(mi, state, deepcopy(m0_state) if copy else m0_state)
281
+ setattr(mi, "_update_count", deepcopy(m0._update_count) if copy else m0._update_count)
282
+ self._state_is_copy = copy
283
+
284
+ def compute(self) -> Dict[str, Any]:
285
+ """Compute the result for each metric in the collection."""
286
+ res = {k: m.compute() for k, m in self.items(keep_base=True, copy_state=False)}
287
+ res = _flatten_dict(res)
288
+ return {self._set_name(k): v for k, v in res.items()}
289
+
290
+ def reset(self) -> None:
291
+ """Iteratively call reset for each metric."""
292
+ for _, m in self.items(keep_base=True, copy_state=False):
293
+ m.reset()
294
+ if self._enable_compute_groups and self._groups_checked:
295
+ # reset state reference
296
+ self._compute_groups_create_state_ref()
297
+
298
+ def clone(self, prefix: Optional[str] = None, postfix: Optional[str] = None) -> "MetricCollection":
299
+ """Make a copy of the metric collection
300
+ Args:
301
+ prefix: a string to append in front of the metric keys
302
+ postfix: a string to append after the keys of the output dict
303
+
304
+ """
305
+ mc = deepcopy(self)
306
+ if prefix:
307
+ mc.prefix = self._check_arg(prefix, "prefix")
308
+ if postfix:
309
+ mc.postfix = self._check_arg(postfix, "postfix")
310
+ return mc
311
+
312
+ def persistent(self, mode: bool = True) -> None:
313
+ """Method for post-init to change if metric states should be saved to its state_dict."""
314
+ for _, m in self.items(keep_base=True, copy_state=False):
315
+ m.persistent(mode)
316
+
317
+ def add_metrics(
318
+ self, metrics: Union[Metric, Sequence[Metric], Dict[str, Metric]], *additional_metrics: Metric
319
+ ) -> None:
320
+ """Add new metrics to Metric Collection."""
321
+ if isinstance(metrics, Metric):
322
+ # set compatible with original type expectations
323
+ metrics = [metrics]
324
+ if isinstance(metrics, Sequence):
325
+ # prepare for optional additions
326
+ metrics = list(metrics)
327
+ remain: list = []
328
+ for m in additional_metrics:
329
+ (metrics if isinstance(m, Metric) else remain).append(m)
330
+
331
+ if remain:
332
+ rank_zero_warn(
333
+ f"You have passes extra arguments {remain} which are not `Metric` so they will be ignored."
334
+ )
335
+ elif additional_metrics:
336
+ raise ValueError(
337
+ f"You have passes extra arguments {additional_metrics} which are not compatible"
338
+ f" with first passed dictionary {metrics} so they will be ignored."
339
+ )
340
+
341
+ if isinstance(metrics, dict):
342
+ # Check all values are metrics
343
+ # Make sure that metrics are added in deterministic order
344
+ for name in sorted(metrics.keys()):
345
+ metric = metrics[name]
346
+ if not isinstance(metric, (Metric, MetricCollection)):
347
+ raise ValueError(
348
+ f"Value {metric} belonging to key {name} is not an instance of"
349
+ " `torchmetrics.Metric` or `torchmetrics.MetricCollection`"
350
+ )
351
+ if isinstance(metric, Metric):
352
+ self[name] = metric
353
+ else:
354
+ for k, v in metric.items(keep_base=False):
355
+ self[f"{name}_{k}"] = v
356
+ elif isinstance(metrics, Sequence):
357
+ for metric in metrics:
358
+ if not isinstance(metric, (Metric, MetricCollection)):
359
+ raise ValueError(
360
+ f"Input {metric} to `MetricCollection` is not a instance of"
361
+ " `torchmetrics.Metric` or `torchmetrics.MetricCollection`"
362
+ )
363
+ if isinstance(metric, Metric):
364
+ name = metric.__class__.__name__
365
+ if name in self:
366
+ raise ValueError(f"Encountered two metrics both named {name}")
367
+ self[name] = metric
368
+ else:
369
+ for k, v in metric.items(keep_base=False):
370
+ self[k] = v
371
+ else:
372
+ raise ValueError("Unknown input to MetricCollection.")
373
+
374
+ self._groups_checked = False
375
+ if self._enable_compute_groups:
376
+ self._init_compute_groups()
377
+ else:
378
+ self._groups = {}
379
+
380
+ def _init_compute_groups(self) -> None:
381
+ """Initialize compute groups.
382
+
383
+ If user provided a list, we check that all metrics in the list are also in the collection. If set to `True` we
384
+ simply initialize each metric in the collection as its own group
385
+ """
386
+ if isinstance(self._enable_compute_groups, list):
387
+ self._groups = {i: k for i, k in enumerate(self._enable_compute_groups)}
388
+ for v in self._groups.values():
389
+ for metric in v:
390
+ if metric not in self:
391
+ raise ValueError(
392
+ f"Input {metric} in `compute_groups` argument does not match a metric in the collection."
393
+ f" Please make sure that {self._enable_compute_groups} matches {self.keys(keep_base=True)}"
394
+ )
395
+ self._groups_checked = True
396
+ else:
397
+ # Initialize all metrics as their own compute group
398
+ self._groups = {i: [str(k)] for i, k in enumerate(self.keys(keep_base=True))}
399
+
400
+ @property
401
+ def compute_groups(self) -> Dict[int, List[str]]:
402
+ """Return a dict with the current compute groups in the collection."""
403
+ return self._groups
404
+
405
+ def _set_name(self, base: str) -> str:
406
+ """Adjust name of metric with both prefix and postfix."""
407
+ name = base if self.prefix is None else self.prefix + base
408
+ name = name if self.postfix is None else name + self.postfix
409
+ return name
410
+
411
+ def _to_renamed_ordered_dict(self) -> OrderedDict:
412
+ od = OrderedDict()
413
+ for k, v in self._modules.items():
414
+ od[self._set_name(k)] = v
415
+ return od
416
+
417
+ def keys(self, keep_base: bool = False) -> Iterable[Hashable]:
418
+ r"""Return an iterable of the ModuleDict key.
419
+
420
+ Args:
421
+ keep_base: Whether to add prefix/postfix on the items collection.
422
+ """
423
+ if keep_base:
424
+ return self._modules.keys()
425
+ return self._to_renamed_ordered_dict().keys()
426
+
427
+ def items(self, keep_base: bool = False, copy_state: bool = True) -> Iterable[Tuple[str, Module]]:
428
+ r"""Return an iterable of the ModuleDict key/value pairs.
429
+
430
+ Args:
431
+ keep_base: Whether to add prefix/postfix on the collection.
432
+ copy_state:
433
+ If metric states should be copied between metrics in the same compute group or just passed by reference
434
+ """
435
+ self._compute_groups_create_state_ref(copy_state)
436
+ if keep_base:
437
+ return self._modules.items()
438
+ return self._to_renamed_ordered_dict().items()
439
+
440
+ def values(self, copy_state: bool = True) -> Iterable[Module]:
441
+ """Return an iterable of the ModuleDict values.
442
+
443
+ Args:
444
+ copy_state:
445
+ If metric states should be copied between metrics in the same compute group or just passed by reference
446
+ """
447
+ self._compute_groups_create_state_ref(copy_state)
448
+ return self._modules.values()
449
+
450
+ def __getitem__(self, key: str, copy_state: bool = True) -> Module:
451
+ """Retrieve a single metric from the collection.
452
+
453
+ Args:
454
+ key: name of metric to retrieve
455
+ copy_state:
456
+ If metric states should be copied between metrics in the same compute group or just passed by reference
457
+ """
458
+ self._compute_groups_create_state_ref(copy_state)
459
+ return self._modules[key]
460
+
461
+ @staticmethod
462
+ def _check_arg(arg: Optional[str], name: str) -> Optional[str]:
463
+ if arg is None or isinstance(arg, str):
464
+ return arg
465
+ raise ValueError(f"Expected input `{name}` to be a string, but got {type(arg)}")
466
+
467
+ def __repr__(self) -> str:
468
+ repr_str = super().__repr__()[:-2]
469
+ if self.prefix:
470
+ repr_str += f",\n prefix={self.prefix}{',' if self.postfix else ''}"
471
+ if self.postfix:
472
+ repr_str += f"{',' if not self.prefix else ''}\n postfix={self.postfix}"
473
+ return repr_str + "\n)"
474
+
475
+ def set_dtype(self, dst_type: Union[str, torch.dtype]) -> "MetricCollection":
476
+ """Transfer all metric state to specific dtype. Special version of standard `type` method.
477
+
478
+ Arguments:
479
+ dst_type (type or string): the desired type.
480
+ """
481
+ for _, m in self.items(keep_base=True, copy_state=False):
482
+ m.set_dtype(dst_type)
483
+ return self
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__init__.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from torchmetrics.functional.classification.accuracy import ( # noqa: F401
15
+ accuracy,
16
+ binary_accuracy,
17
+ multiclass_accuracy,
18
+ multilabel_accuracy,
19
+ )
20
+ from torchmetrics.functional.classification.auroc import ( # noqa: F401
21
+ auroc,
22
+ binary_auroc,
23
+ multiclass_auroc,
24
+ multilabel_auroc,
25
+ )
26
+ from torchmetrics.functional.classification.average_precision import ( # noqa: F401
27
+ average_precision,
28
+ binary_average_precision,
29
+ multiclass_average_precision,
30
+ multilabel_average_precision,
31
+ )
32
+ from torchmetrics.functional.classification.calibration_error import ( # noqa: F401
33
+ binary_calibration_error,
34
+ calibration_error,
35
+ multiclass_calibration_error,
36
+ )
37
+ from torchmetrics.functional.classification.cohen_kappa import ( # noqa: F401
38
+ binary_cohen_kappa,
39
+ cohen_kappa,
40
+ multiclass_cohen_kappa,
41
+ )
42
+ from torchmetrics.functional.classification.confusion_matrix import ( # noqa: F401
43
+ binary_confusion_matrix,
44
+ confusion_matrix,
45
+ multiclass_confusion_matrix,
46
+ multilabel_confusion_matrix,
47
+ )
48
+ from torchmetrics.functional.classification.dice import dice # noqa: F401
49
+ from torchmetrics.functional.classification.exact_match import ( # noqa: F401
50
+ exact_match,
51
+ multiclass_exact_match,
52
+ multilabel_exact_match,
53
+ )
54
+ from torchmetrics.functional.classification.f_beta import ( # noqa: F401
55
+ binary_f1_score,
56
+ binary_fbeta_score,
57
+ f1_score,
58
+ fbeta_score,
59
+ multiclass_f1_score,
60
+ multiclass_fbeta_score,
61
+ multilabel_f1_score,
62
+ multilabel_fbeta_score,
63
+ )
64
+ from torchmetrics.functional.classification.hamming import ( # noqa: F401
65
+ binary_hamming_distance,
66
+ hamming_distance,
67
+ multiclass_hamming_distance,
68
+ multilabel_hamming_distance,
69
+ )
70
+ from torchmetrics.functional.classification.hinge import ( # noqa: F401
71
+ binary_hinge_loss,
72
+ hinge_loss,
73
+ multiclass_hinge_loss,
74
+ )
75
+ from torchmetrics.functional.classification.jaccard import ( # noqa: F401
76
+ binary_jaccard_index,
77
+ jaccard_index,
78
+ multiclass_jaccard_index,
79
+ multilabel_jaccard_index,
80
+ )
81
+ from torchmetrics.functional.classification.matthews_corrcoef import ( # noqa: F401
82
+ binary_matthews_corrcoef,
83
+ matthews_corrcoef,
84
+ multiclass_matthews_corrcoef,
85
+ multilabel_matthews_corrcoef,
86
+ )
87
+ from torchmetrics.functional.classification.precision_recall import ( # noqa: F401
88
+ binary_precision,
89
+ binary_recall,
90
+ multiclass_precision,
91
+ multiclass_recall,
92
+ multilabel_precision,
93
+ multilabel_recall,
94
+ precision,
95
+ recall,
96
+ )
97
+ from torchmetrics.functional.classification.precision_recall_curve import ( # noqa: F401
98
+ binary_precision_recall_curve,
99
+ multiclass_precision_recall_curve,
100
+ multilabel_precision_recall_curve,
101
+ precision_recall_curve,
102
+ )
103
+ from torchmetrics.functional.classification.ranking import ( # noqa: F401
104
+ multilabel_coverage_error,
105
+ multilabel_ranking_average_precision,
106
+ multilabel_ranking_loss,
107
+ )
108
+ from torchmetrics.functional.classification.recall_at_fixed_precision import ( # noqa: F401
109
+ binary_recall_at_fixed_precision,
110
+ multiclass_recall_at_fixed_precision,
111
+ multilabel_recall_at_fixed_precision,
112
+ )
113
+ from torchmetrics.functional.classification.roc import binary_roc, multiclass_roc, multilabel_roc, roc # noqa: F401
114
+ from torchmetrics.functional.classification.specificity import ( # noqa: F401
115
+ binary_specificity,
116
+ multiclass_specificity,
117
+ multilabel_specificity,
118
+ specificity,
119
+ )
120
+ from torchmetrics.functional.classification.stat_scores import ( # noqa: F401
121
+ binary_stat_scores,
122
+ multiclass_stat_scores,
123
+ multilabel_stat_scores,
124
+ stat_scores,
125
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/auroc.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/average_precision.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/calibration_error.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/jaccard.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/precision_recall_curve.cpython-310.pyc ADDED
Binary file (32.2 kB). View file
 
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/ranking.cpython-310.pyc ADDED
Binary file (9.42 kB). View file
 
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/__pycache__/roc.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/accuracy.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.stat_scores import (
21
+ _binary_stat_scores_arg_validation,
22
+ _binary_stat_scores_format,
23
+ _binary_stat_scores_tensor_validation,
24
+ _binary_stat_scores_update,
25
+ _multiclass_stat_scores_arg_validation,
26
+ _multiclass_stat_scores_format,
27
+ _multiclass_stat_scores_tensor_validation,
28
+ _multiclass_stat_scores_update,
29
+ _multilabel_stat_scores_arg_validation,
30
+ _multilabel_stat_scores_format,
31
+ _multilabel_stat_scores_tensor_validation,
32
+ _multilabel_stat_scores_update,
33
+ )
34
+ from torchmetrics.utilities.compute import _safe_divide
35
+
36
+
37
+ def _accuracy_reduce(
38
+ tp: Tensor,
39
+ fp: Tensor,
40
+ tn: Tensor,
41
+ fn: Tensor,
42
+ average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]],
43
+ multidim_average: Literal["global", "samplewise"] = "global",
44
+ multilabel: bool = False,
45
+ ) -> Tensor:
46
+ """Reduce classification statistics into accuracy score
47
+ Args:
48
+ tp: number of true positives
49
+ fp: number of false positives
50
+ tn: number of true negatives
51
+ fn: number of false negatives
52
+ normalize: normalization method.
53
+ - `"true"` will divide by the sum of the column dimension.
54
+ - `"pred"` will divide by the sum of the row dimension.
55
+ - `"all"` will divide by the sum of the full matrix
56
+ - `"none"` or `None` will apply no reduction
57
+ multilabel: bool indicating if reduction is for multilabel tasks
58
+
59
+ Returns:
60
+ Accuracy score
61
+ """
62
+ if average == "binary":
63
+ return _safe_divide(tp + tn, tp + tn + fp + fn)
64
+ elif average == "micro":
65
+ tp = tp.sum(dim=0 if multidim_average == "global" else 1)
66
+ fn = fn.sum(dim=0 if multidim_average == "global" else 1)
67
+ if multilabel:
68
+ fp = fp.sum(dim=0 if multidim_average == "global" else 1)
69
+ tn = tn.sum(dim=0 if multidim_average == "global" else 1)
70
+ return _safe_divide(tp + tn, tp + tn + fp + fn)
71
+ return _safe_divide(tp, tp + fn)
72
+ else:
73
+ if multilabel:
74
+ score = _safe_divide(tp + tn, tp + tn + fp + fn)
75
+ else:
76
+ score = _safe_divide(tp, tp + fn)
77
+ if average is None or average == "none":
78
+ return score
79
+ if average == "weighted":
80
+ weights = tp + fn
81
+ else:
82
+ weights = torch.ones_like(score)
83
+ return _safe_divide(weights * score, weights.sum(-1, keepdim=True)).sum(-1)
84
+
85
+
86
+ def binary_accuracy(
87
+ preds: Tensor,
88
+ target: Tensor,
89
+ threshold: float = 0.5,
90
+ multidim_average: Literal["global", "samplewise"] = "global",
91
+ ignore_index: Optional[int] = None,
92
+ validate_args: bool = True,
93
+ ) -> Tensor:
94
+ r"""Computes `Accuracy`_ for binary tasks:
95
+
96
+ .. math::
97
+ \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
98
+
99
+ Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a
100
+ tensor of predictions.
101
+
102
+ Accepts the following input tensors:
103
+
104
+ - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
105
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
106
+ we convert to int tensor with thresholding using the value in ``threshold``.
107
+ - ``target`` (int tensor): ``(N, ...)``
108
+
109
+ Args:
110
+ preds: Tensor with predictions
111
+ target: Tensor with true labels
112
+ threshold: Threshold for transforming probability to binary {0,1} predictions
113
+ multidim_average:
114
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
115
+
116
+ - ``global``: Additional dimensions are flatted along the batch dimension
117
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
118
+ The statistics in this case are calculated over the additional dimensions.
119
+
120
+ ignore_index:
121
+ Specifies a target value that is ignored and does not contribute to the metric calculation
122
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
123
+ Set to ``False`` for faster computations.
124
+
125
+ Returns:
126
+ If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
127
+ is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
128
+
129
+ Example (preds is int tensor):
130
+ >>> from torchmetrics.functional.classification import binary_accuracy
131
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
132
+ >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
133
+ >>> binary_accuracy(preds, target)
134
+ tensor(0.6667)
135
+
136
+ Example (preds is float tensor):
137
+ >>> from torchmetrics.functional.classification import binary_accuracy
138
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
139
+ >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
140
+ >>> binary_accuracy(preds, target)
141
+ tensor(0.6667)
142
+
143
+ Example (multidim tensors):
144
+ >>> from torchmetrics.functional.classification import binary_accuracy
145
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
146
+ >>> preds = torch.tensor(
147
+ ... [
148
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
149
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
150
+ ... ]
151
+ ... )
152
+ >>> binary_accuracy(preds, target, multidim_average='samplewise')
153
+ tensor([0.3333, 0.1667])
154
+ """
155
+ if validate_args:
156
+ _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
157
+ _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
158
+ preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
159
+ tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
160
+ return _accuracy_reduce(tp, fp, tn, fn, average="binary", multidim_average=multidim_average)
161
+
162
+
163
+ def multiclass_accuracy(
164
+ preds: Tensor,
165
+ target: Tensor,
166
+ num_classes: int,
167
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
168
+ top_k: int = 1,
169
+ multidim_average: Literal["global", "samplewise"] = "global",
170
+ ignore_index: Optional[int] = None,
171
+ validate_args: bool = True,
172
+ ) -> Tensor:
173
+ r"""Computes `Accuracy`_ for multiclass tasks:
174
+
175
+ .. math::
176
+ \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
177
+
178
+ Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a
179
+ tensor of predictions.
180
+
181
+ Accepts the following input tensors:
182
+
183
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
184
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
185
+ an int tensor.
186
+ - ``target`` (int tensor): ``(N, ...)``
187
+
188
+ Args:
189
+ preds: Tensor with predictions
190
+ target: Tensor with true labels
191
+ num_classes: Integer specifing the number of classes
192
+ average:
193
+ Defines the reduction that is applied over labels. Should be one of the following:
194
+
195
+ - ``micro``: Sum statistics over all labels
196
+ - ``macro``: Calculate statistics for each label and average them
197
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
198
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
199
+
200
+ top_k:
201
+ Number of highest probability or logit score predictions considered to find the correct label.
202
+ Only works when ``preds`` contain probabilities/logits.
203
+ multidim_average:
204
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
205
+
206
+ - ``global``: Additional dimensions are flatted along the batch dimension
207
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
208
+ The statistics in this case are calculated over the additional dimensions.
209
+
210
+ ignore_index:
211
+ Specifies a target value that is ignored and does not contribute to the metric calculation
212
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
213
+ Set to ``False`` for faster computations.
214
+
215
+ Returns:
216
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
217
+
218
+ - If ``multidim_average`` is set to ``global``:
219
+
220
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
221
+ - If ``average=None/'none'``, the shape will be ``(C,)``
222
+
223
+ - If ``multidim_average`` is set to ``samplewise``:
224
+
225
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
226
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
227
+
228
+ Example (preds is int tensor):
229
+ >>> from torchmetrics.functional.classification import multiclass_accuracy
230
+ >>> target = torch.tensor([2, 1, 0, 0])
231
+ >>> preds = torch.tensor([2, 1, 0, 1])
232
+ >>> multiclass_accuracy(preds, target, num_classes=3)
233
+ tensor(0.8333)
234
+ >>> multiclass_accuracy(preds, target, num_classes=3, average=None)
235
+ tensor([0.5000, 1.0000, 1.0000])
236
+
237
+ Example (preds is float tensor):
238
+ >>> from torchmetrics.functional.classification import multiclass_accuracy
239
+ >>> target = torch.tensor([2, 1, 0, 0])
240
+ >>> preds = torch.tensor([
241
+ ... [0.16, 0.26, 0.58],
242
+ ... [0.22, 0.61, 0.17],
243
+ ... [0.71, 0.09, 0.20],
244
+ ... [0.05, 0.82, 0.13],
245
+ ... ])
246
+ >>> multiclass_accuracy(preds, target, num_classes=3)
247
+ tensor(0.8333)
248
+ >>> multiclass_accuracy(preds, target, num_classes=3, average=None)
249
+ tensor([0.5000, 1.0000, 1.0000])
250
+
251
+ Example (multidim tensors):
252
+ >>> from torchmetrics.functional.classification import multiclass_accuracy
253
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
254
+ >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
255
+ >>> multiclass_accuracy(preds, target, num_classes=3, multidim_average='samplewise')
256
+ tensor([0.5000, 0.2778])
257
+ >>> multiclass_accuracy(preds, target, num_classes=3, multidim_average='samplewise', average=None)
258
+ tensor([[1.0000, 0.0000, 0.5000],
259
+ [0.0000, 0.3333, 0.5000]])
260
+ """
261
+ if validate_args:
262
+ _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
263
+ _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
264
+ preds, target = _multiclass_stat_scores_format(preds, target, top_k)
265
+ tp, fp, tn, fn = _multiclass_stat_scores_update(
266
+ preds, target, num_classes, top_k, average, multidim_average, ignore_index
267
+ )
268
+ return _accuracy_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average)
269
+
270
+
271
+ def multilabel_accuracy(
272
+ preds: Tensor,
273
+ target: Tensor,
274
+ num_labels: int,
275
+ threshold: float = 0.5,
276
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
277
+ multidim_average: Literal["global", "samplewise"] = "global",
278
+ ignore_index: Optional[int] = None,
279
+ validate_args: bool = True,
280
+ ) -> Tensor:
281
+ r"""Computes `Accuracy`_ for multilabel tasks:
282
+
283
+ .. math::
284
+ \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
285
+
286
+ Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a
287
+ tensor of predictions.
288
+
289
+ Accepts the following input tensors:
290
+
291
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
292
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
293
+ we convert to int tensor with thresholding using the value in ``threshold``.
294
+ - ``target`` (int tensor): ``(N, C, ...)``
295
+
296
+ Args:
297
+ preds: Tensor with predictions
298
+ target: Tensor with true labels
299
+ num_labels: Integer specifing the number of labels
300
+ threshold: Threshold for transforming probability to binary (0,1) predictions
301
+ average:
302
+ Defines the reduction that is applied over labels. Should be one of the following:
303
+
304
+ - ``micro``: Sum statistics over all labels
305
+ - ``macro``: Calculate statistics for each label and average them
306
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
307
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
308
+
309
+ multidim_average:
310
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
311
+
312
+ - ``global``: Additional dimensions are flatted along the batch dimension
313
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
314
+ The statistics in this case are calculated over the additional dimensions.
315
+
316
+ ignore_index:
317
+ Specifies a target value that is ignored and does not contribute to the metric calculation
318
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
319
+ Set to ``False`` for faster computations.
320
+
321
+ Returns:
322
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
323
+
324
+ - If ``multidim_average`` is set to ``global``:
325
+
326
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
327
+ - If ``average=None/'none'``, the shape will be ``(C,)``
328
+
329
+ - If ``multidim_average`` is set to ``samplewise``:
330
+
331
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
332
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
333
+
334
+ Example (preds is int tensor):
335
+ >>> from torchmetrics.functional.classification import multilabel_accuracy
336
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
337
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
338
+ >>> multilabel_accuracy(preds, target, num_labels=3)
339
+ tensor(0.6667)
340
+ >>> multilabel_accuracy(preds, target, num_labels=3, average=None)
341
+ tensor([1.0000, 0.5000, 0.5000])
342
+
343
+ Example (preds is float tensor):
344
+ >>> from torchmetrics.functional.classification import multilabel_accuracy
345
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
346
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
347
+ >>> multilabel_accuracy(preds, target, num_labels=3)
348
+ tensor(0.6667)
349
+ >>> multilabel_accuracy(preds, target, num_labels=3, average=None)
350
+ tensor([1.0000, 0.5000, 0.5000])
351
+
352
+ Example (multidim tensors):
353
+ >>> from torchmetrics.functional.classification import multilabel_accuracy
354
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
355
+ >>> preds = torch.tensor(
356
+ ... [
357
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
358
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
359
+ ... ]
360
+ ... )
361
+ >>> multilabel_accuracy(preds, target, num_labels=3, multidim_average='samplewise')
362
+ tensor([0.3333, 0.1667])
363
+ >>> multilabel_accuracy(preds, target, num_labels=3, multidim_average='samplewise', average=None)
364
+ tensor([[0.5000, 0.5000, 0.0000],
365
+ [0.0000, 0.0000, 0.5000]])
366
+ """
367
+ if validate_args:
368
+ _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
369
+ _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
370
+ preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
371
+ tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
372
+ return _accuracy_reduce(tp, fp, tn, fn, average=average, multidim_average=multidim_average, multilabel=True)
373
+
374
+
375
+ def accuracy(
376
+ preds: Tensor,
377
+ target: Tensor,
378
+ task: Literal["binary", "multiclass", "multilabel"],
379
+ threshold: float = 0.5,
380
+ num_classes: Optional[int] = None,
381
+ num_labels: Optional[int] = None,
382
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
383
+ multidim_average: Optional[Literal["global", "samplewise"]] = "global",
384
+ top_k: Optional[int] = 1,
385
+ ignore_index: Optional[int] = None,
386
+ validate_args: bool = True,
387
+ ) -> Tensor:
388
+ r"""Computes `Accuracy`_
389
+
390
+ .. math::
391
+ \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)
392
+
393
+ Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
394
+
395
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
396
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
397
+ :func:`binary_accuracy`, :func:`multiclass_accuracy` and :func:`multilabel_accuracy` for the specific details of
398
+ each argument influence and examples.
399
+
400
+ Legacy Example:
401
+ >>> import torch
402
+ >>> target = torch.tensor([0, 1, 2, 3])
403
+ >>> preds = torch.tensor([0, 2, 1, 3])
404
+ >>> accuracy(preds, target, task="multiclass", num_classes=4)
405
+ tensor(0.5000)
406
+
407
+ >>> target = torch.tensor([0, 1, 2])
408
+ >>> preds = torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]])
409
+ >>> accuracy(preds, target, task="multiclass", num_classes=3, top_k=2)
410
+ tensor(0.6667)
411
+ """
412
+ assert multidim_average is not None
413
+ if task == "binary":
414
+ return binary_accuracy(preds, target, threshold, multidim_average, ignore_index, validate_args)
415
+ if task == "multiclass":
416
+ assert isinstance(num_classes, int)
417
+ assert isinstance(top_k, int)
418
+ return multiclass_accuracy(
419
+ preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
420
+ )
421
+ if task == "multilabel":
422
+ assert isinstance(num_labels, int)
423
+ return multilabel_accuracy(
424
+ preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
425
+ )
426
+ raise ValueError(
427
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
428
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/auroc.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import List, Optional, Tuple, Union
15
+
16
+ import torch
17
+ from torch import Tensor, tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.precision_recall_curve import (
21
+ _binary_precision_recall_curve_arg_validation,
22
+ _binary_precision_recall_curve_format,
23
+ _binary_precision_recall_curve_tensor_validation,
24
+ _binary_precision_recall_curve_update,
25
+ _multiclass_precision_recall_curve_arg_validation,
26
+ _multiclass_precision_recall_curve_format,
27
+ _multiclass_precision_recall_curve_tensor_validation,
28
+ _multiclass_precision_recall_curve_update,
29
+ _multilabel_precision_recall_curve_arg_validation,
30
+ _multilabel_precision_recall_curve_format,
31
+ _multilabel_precision_recall_curve_tensor_validation,
32
+ _multilabel_precision_recall_curve_update,
33
+ )
34
+ from torchmetrics.functional.classification.roc import (
35
+ _binary_roc_compute,
36
+ _multiclass_roc_compute,
37
+ _multilabel_roc_compute,
38
+ )
39
+ from torchmetrics.utilities.compute import _auc_compute_without_check, _safe_divide
40
+ from torchmetrics.utilities.data import _bincount
41
+ from torchmetrics.utilities.prints import rank_zero_warn
42
+
43
+
44
+ def _reduce_auroc(
45
+ fpr: Union[Tensor, List[Tensor]],
46
+ tpr: Union[Tensor, List[Tensor]],
47
+ average: Optional[Literal["macro", "weighted", "none"]] = "macro",
48
+ weights: Optional[Tensor] = None,
49
+ ) -> Tensor:
50
+ """Utility function for reducing multiple average precision score into one number."""
51
+ if isinstance(fpr, Tensor):
52
+ res = _auc_compute_without_check(fpr, tpr, 1.0, axis=1)
53
+ else:
54
+ res = [_auc_compute_without_check(x, y, 1.0) for x, y in zip(fpr, tpr)]
55
+ res = torch.stack(res)
56
+ if average is None or average == "none":
57
+ return res
58
+ if torch.isnan(res).any():
59
+ rank_zero_warn(
60
+ f"Average precision score for one or more classes was `nan`. Ignoring these classes in {average}-average",
61
+ UserWarning,
62
+ )
63
+ idx = ~torch.isnan(res)
64
+ if average == "macro":
65
+ return res[idx].mean()
66
+ elif average == "weighted" and weights is not None:
67
+ weights = _safe_divide(weights[idx], weights[idx].sum())
68
+ return (res[idx] * weights).sum()
69
+ else:
70
+ raise ValueError("Received an incompatible combinations of inputs to make reduction.")
71
+
72
+
73
+ def _binary_auroc_arg_validation(
74
+ max_fpr: Optional[float] = None,
75
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
76
+ ignore_index: Optional[int] = None,
77
+ ) -> None:
78
+ _binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
79
+ if max_fpr is not None and not isinstance(max_fpr, float) and 0 < max_fpr <= 1:
80
+ raise ValueError(f"Arguments `max_fpr` should be a float in range (0, 1], but got: {max_fpr}")
81
+
82
+
83
+ def _binary_auroc_compute(
84
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
85
+ thresholds: Optional[Tensor],
86
+ max_fpr: Optional[float] = None,
87
+ pos_label: int = 1,
88
+ ) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor]]:
89
+ fpr, tpr, _ = _binary_roc_compute(state, thresholds, pos_label)
90
+ if max_fpr is None or max_fpr == 1:
91
+ return _auc_compute_without_check(fpr, tpr, 1.0)
92
+
93
+ _device = fpr.device if isinstance(fpr, Tensor) else fpr[0].device
94
+ max_area: Tensor = tensor(max_fpr, device=_device)
95
+ # Add a single point at max_fpr and interpolate its tpr value
96
+ stop = torch.bucketize(max_area, fpr, out_int32=True, right=True)
97
+ weight = (max_area - fpr[stop - 1]) / (fpr[stop] - fpr[stop - 1])
98
+ interp_tpr: Tensor = torch.lerp(tpr[stop - 1], tpr[stop], weight)
99
+ tpr = torch.cat([tpr[:stop], interp_tpr.view(1)])
100
+ fpr = torch.cat([fpr[:stop], max_area.view(1)])
101
+
102
+ # Compute partial AUC
103
+ partial_auc = _auc_compute_without_check(fpr, tpr, 1.0)
104
+
105
+ # McClish correction: standardize result to be 0.5 if non-discriminant and 1 if maximal
106
+ min_area: Tensor = 0.5 * max_area**2
107
+ return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
108
+
109
+
110
+ def binary_auroc(
111
+ preds: Tensor,
112
+ target: Tensor,
113
+ max_fpr: Optional[float] = None,
114
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
115
+ ignore_index: Optional[int] = None,
116
+ validate_args: bool = True,
117
+ ) -> Tuple[Tensor, Tensor, Tensor]:
118
+ r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for binary tasks. The AUROC
119
+ score summarizes the ROC curve into an single number that describes the performance of a model for multiple
120
+ thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
121
+ corresponds to random guessing.
122
+
123
+ Accepts the following input tensors:
124
+
125
+ - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
126
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
127
+ sigmoid per element.
128
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
129
+ only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
130
+
131
+ Additional dimension ``...`` will be flattened into the batch dimension.
132
+
133
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
134
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
135
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
136
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
137
+ size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
138
+
139
+ Args:
140
+ preds: Tensor with predictions
141
+ target: Tensor with true labels
142
+ max_fpr: If not ``None``, calculates standardized partial AUC over the range ``[0, max_fpr]``.
143
+ thresholds:
144
+ Can be one of:
145
+
146
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
147
+ all the data. Most accurate but also most memory consuming approach.
148
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
149
+ 0 to 1 as bins for the calculation.
150
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
151
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
152
+ bins for the calculation.
153
+
154
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
155
+ Set to ``False`` for faster computations.
156
+
157
+ Returns:
158
+ A single scalar with the auroc score
159
+
160
+ Example:
161
+ >>> from torchmetrics.functional.classification import binary_auroc
162
+ >>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
163
+ >>> target = torch.tensor([0, 1, 1, 0])
164
+ >>> binary_auroc(preds, target, thresholds=None)
165
+ tensor(0.5000)
166
+ >>> binary_auroc(preds, target, thresholds=5)
167
+ tensor(0.5000)
168
+ """
169
+ if validate_args:
170
+ _binary_auroc_arg_validation(max_fpr, thresholds, ignore_index)
171
+ _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
172
+ preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
173
+ state = _binary_precision_recall_curve_update(preds, target, thresholds)
174
+ return _binary_auroc_compute(state, thresholds, max_fpr)
175
+
176
+
177
+ def _multiclass_auroc_arg_validation(
178
+ num_classes: int,
179
+ average: Optional[Literal["macro", "weighted", "none"]] = "macro",
180
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
181
+ ignore_index: Optional[int] = None,
182
+ ) -> None:
183
+ _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)
184
+ allowed_average = ("macro", "weighted", "none", None)
185
+ if average not in allowed_average:
186
+ raise ValueError(f"Expected argument `average` to be one of {allowed_average} but got {average}")
187
+
188
+
189
+ def _multiclass_auroc_compute(
190
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
191
+ num_classes: int,
192
+ average: Optional[Literal["macro", "weighted", "none"]] = "macro",
193
+ thresholds: Optional[Tensor] = None,
194
+ ) -> Tensor:
195
+ fpr, tpr, _ = _multiclass_roc_compute(state, num_classes, thresholds)
196
+ return _reduce_auroc(
197
+ fpr,
198
+ tpr,
199
+ average,
200
+ weights=_bincount(state[1], minlength=num_classes).float() if thresholds is None else state[0][:, 1, :].sum(-1),
201
+ )
202
+
203
+
204
+ def multiclass_auroc(
205
+ preds: Tensor,
206
+ target: Tensor,
207
+ num_classes: int,
208
+ average: Optional[Literal["macro", "weighted", "none"]] = "macro",
209
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
210
+ ignore_index: Optional[int] = None,
211
+ validate_args: bool = True,
212
+ ) -> Tensor:
213
+ r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multiclass tasks. The AUROC
214
+ score summarizes the ROC curve into an single number that describes the performance of a model for multiple
215
+ thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
216
+ corresponds to random guessing.
217
+
218
+ Accepts the following input tensors:
219
+
220
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
221
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
222
+ softmax per sample.
223
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
224
+ only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
225
+
226
+ Additional dimension ``...`` will be flattened into the batch dimension.
227
+
228
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
229
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
230
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
231
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
232
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
233
+
234
+ Args:
235
+ preds: Tensor with predictions
236
+ target: Tensor with true labels
237
+ num_classes: Integer specifing the number of classes
238
+ average:
239
+ Defines the reduction that is applied over classes. Should be one of the following:
240
+
241
+ - ``macro``: Calculate score for each class and average them
242
+ - ``weighted``: Calculates score for each class and computes weighted average using their support
243
+ - ``"none"`` or ``None``: Calculates score for each class and applies no reduction
244
+ thresholds:
245
+ Can be one of:
246
+
247
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
248
+ all the data. Most accurate but also most memory consuming approach.
249
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
250
+ 0 to 1 as bins for the calculation.
251
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
252
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
253
+ bins for the calculation.
254
+
255
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
256
+ Set to ``False`` for faster computations.
257
+
258
+ Returns:
259
+ If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be returned with auroc score per class.
260
+ If `average="macro"|"weighted"` then a single scalar is returned.
261
+
262
+ Example:
263
+ >>> from torchmetrics.functional.classification import multiclass_auroc
264
+ >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
265
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
266
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
267
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
268
+ >>> target = torch.tensor([0, 1, 3, 2])
269
+ >>> multiclass_auroc(preds, target, num_classes=5, average="macro", thresholds=None)
270
+ tensor(0.5333)
271
+ >>> multiclass_auroc(preds, target, num_classes=5, average=None, thresholds=None)
272
+ tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
273
+ >>> multiclass_auroc(preds, target, num_classes=5, average="macro", thresholds=5)
274
+ tensor(0.5333)
275
+ >>> multiclass_auroc(preds, target, num_classes=5, average=None, thresholds=5)
276
+ tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
277
+ """
278
+ if validate_args:
279
+ _multiclass_auroc_arg_validation(num_classes, average, thresholds, ignore_index)
280
+ _multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
281
+ preds, target, thresholds = _multiclass_precision_recall_curve_format(
282
+ preds, target, num_classes, thresholds, ignore_index
283
+ )
284
+ state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)
285
+ return _multiclass_auroc_compute(state, num_classes, average, thresholds)
286
+
287
+
288
+ def _multilabel_auroc_arg_validation(
289
+ num_labels: int,
290
+ average: Optional[Literal["micro", "macro", "weighted", "none"]],
291
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
292
+ ignore_index: Optional[int] = None,
293
+ ) -> None:
294
+ _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
295
+ allowed_average = ("micro", "macro", "weighted", "none", None)
296
+ if average not in allowed_average:
297
+ raise ValueError(f"Expected argument `average` to be one of {allowed_average} but got {average}")
298
+
299
+
300
+ def _multilabel_auroc_compute(
301
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
302
+ num_labels: int,
303
+ average: Optional[Literal["micro", "macro", "weighted", "none"]],
304
+ thresholds: Optional[Tensor],
305
+ ignore_index: Optional[int] = None,
306
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tensor]:
307
+ if average == "micro":
308
+ if isinstance(state, Tensor) and thresholds is not None:
309
+ return _binary_auroc_compute(state.sum(1), thresholds, max_fpr=None)
310
+ else:
311
+ preds = state[0].flatten()
312
+ target = state[1].flatten()
313
+ if ignore_index is not None:
314
+ idx = target == ignore_index
315
+ preds = preds[~idx]
316
+ target = target[~idx]
317
+ return _binary_auroc_compute((preds, target), thresholds, max_fpr=None)
318
+
319
+ else:
320
+ fpr, tpr, _ = _multilabel_roc_compute(state, num_labels, thresholds, ignore_index)
321
+ return _reduce_auroc(
322
+ fpr,
323
+ tpr,
324
+ average,
325
+ weights=(state[1] == 1).sum(dim=0).float() if thresholds is None else state[0][:, 1, :].sum(-1),
326
+ )
327
+
328
+
329
+ def multilabel_auroc(
330
+ preds: Tensor,
331
+ target: Tensor,
332
+ num_labels: int,
333
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
334
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
335
+ ignore_index: Optional[int] = None,
336
+ validate_args: bool = True,
337
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
338
+ r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multilabel tasks. The AUROC
339
+ score summarizes the ROC curve into an single number that describes the performance of a model for multiple
340
+ thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
341
+ corresponds to random guessing.
342
+
343
+ Accepts the following input tensors:
344
+
345
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
346
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
347
+ sigmoid per element.
348
+ - ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
349
+ only contain {0,1} values (except if `ignore_index` is specified).
350
+
351
+ Additional dimension ``...`` will be flattened into the batch dimension.
352
+
353
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
354
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
355
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
356
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
357
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
358
+
359
+ Args:
360
+ preds: Tensor with predictions
361
+ target: Tensor with true labels
362
+ num_labels: Integer specifing the number of labels
363
+ average:
364
+ Defines the reduction that is applied over labels. Should be one of the following:
365
+
366
+ - ``micro``: Sum score over all labels
367
+ - ``macro``: Calculate score for each label and average them
368
+ - ``weighted``: Calculates score for each label and computes weighted average using their support
369
+ - ``"none"`` or ``None``: Calculates score for each label and applies no reduction
370
+ thresholds:
371
+ Can be one of:
372
+
373
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
374
+ all the data. Most accurate but also most memory consuming approach.
375
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
376
+ 0 to 1 as bins for the calculation.
377
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
378
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
379
+ bins for the calculation.
380
+
381
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
382
+ Set to ``False`` for faster computations.
383
+
384
+ Returns:
385
+ If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will be returned with auroc score per class.
386
+ If `average="micro|macro"|"weighted"` then a single scalar is returned.
387
+
388
+ Example:
389
+ >>> from torchmetrics.functional.classification import multilabel_auroc
390
+ >>> preds = torch.tensor([[0.75, 0.05, 0.35],
391
+ ... [0.45, 0.75, 0.05],
392
+ ... [0.05, 0.55, 0.75],
393
+ ... [0.05, 0.65, 0.05]])
394
+ >>> target = torch.tensor([[1, 0, 1],
395
+ ... [0, 0, 0],
396
+ ... [0, 1, 1],
397
+ ... [1, 1, 1]])
398
+ >>> multilabel_auroc(preds, target, num_labels=3, average="macro", thresholds=None)
399
+ tensor(0.6528)
400
+ >>> multilabel_auroc(preds, target, num_labels=3, average=None, thresholds=None)
401
+ tensor([0.6250, 0.5000, 0.8333])
402
+ >>> multilabel_auroc(preds, target, num_labels=3, average="macro", thresholds=5)
403
+ tensor(0.6528)
404
+ >>> multilabel_auroc(preds, target, num_labels=3, average=None, thresholds=5)
405
+ tensor([0.6250, 0.5000, 0.8333])
406
+ """
407
+ if validate_args:
408
+ _multilabel_auroc_arg_validation(num_labels, average, thresholds, ignore_index)
409
+ _multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
410
+ preds, target, thresholds = _multilabel_precision_recall_curve_format(
411
+ preds, target, num_labels, thresholds, ignore_index
412
+ )
413
+ state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
414
+ return _multilabel_auroc_compute(state, num_labels, average, thresholds, ignore_index)
415
+
416
+
417
+ def auroc(
418
+ preds: Tensor,
419
+ target: Tensor,
420
+ task: Literal["binary", "multiclass", "multilabel"],
421
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
422
+ num_classes: Optional[int] = None,
423
+ num_labels: Optional[int] = None,
424
+ average: Optional[Literal["macro", "weighted", "none"]] = "macro",
425
+ max_fpr: Optional[float] = None,
426
+ ignore_index: Optional[int] = None,
427
+ validate_args: bool = True,
428
+ ) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
429
+ r"""Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_). The AUROC score summarizes the
430
+ ROC curve into an single number that describes the performance of a model for multiple thresholds at the same
431
+ time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 corresponds to random guessing.
432
+
433
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
434
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
435
+ :func:`binary_auroc`, :func:`multiclass_auroc` and :func:`multilabel_auroc` for the specific details of
436
+ each argument influence and examples.
437
+
438
+ Legacy Example:
439
+ >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])
440
+ >>> target = torch.tensor([0, 0, 1, 1, 1])
441
+ >>> auroc(preds, target, task='binary')
442
+ tensor(0.5000)
443
+
444
+ >>> preds = torch.tensor([[0.90, 0.05, 0.05],
445
+ ... [0.05, 0.90, 0.05],
446
+ ... [0.05, 0.05, 0.90],
447
+ ... [0.85, 0.05, 0.10],
448
+ ... [0.10, 0.10, 0.80]])
449
+ >>> target = torch.tensor([0, 1, 1, 2, 2])
450
+ >>> auroc(preds, target, task='multiclass', num_classes=3)
451
+ tensor(0.7778)
452
+ """
453
+ if task == "binary":
454
+ return binary_auroc(preds, target, max_fpr, thresholds, ignore_index, validate_args)
455
+ if task == "multiclass":
456
+ assert isinstance(num_classes, int)
457
+ return multiclass_auroc(preds, target, num_classes, average, thresholds, ignore_index, validate_args)
458
+ if task == "multilabel":
459
+ assert isinstance(num_labels, int)
460
+ return multilabel_auroc(preds, target, num_labels, average, thresholds, ignore_index, validate_args)
461
+ raise ValueError(
462
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
463
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/calibration_error.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional, Tuple, Union
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.confusion_matrix import (
21
+ _binary_confusion_matrix_format,
22
+ _binary_confusion_matrix_tensor_validation,
23
+ _multiclass_confusion_matrix_format,
24
+ _multiclass_confusion_matrix_tensor_validation,
25
+ )
26
+
27
+
28
+ def _binning_bucketize(
29
+ confidences: Tensor, accuracies: Tensor, bin_boundaries: Tensor
30
+ ) -> Tuple[Tensor, Tensor, Tensor]:
31
+ """Compute calibration bins using ``torch.bucketize``. Use for pytorch >= 1.6.
32
+
33
+ Args:
34
+ confidences: The confidence (i.e. predicted prob) of the top1 prediction.
35
+ accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.
36
+ bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.
37
+
38
+ Returns:
39
+ tuple with binned accuracy, binned confidence and binned probabilities
40
+ """
41
+ accuracies = accuracies.to(dtype=confidences.dtype)
42
+ acc_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype)
43
+ conf_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype)
44
+ count_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype)
45
+
46
+ indices = torch.bucketize(confidences, bin_boundaries) - 1
47
+
48
+ count_bin.scatter_add_(dim=0, index=indices, src=torch.ones_like(confidences))
49
+
50
+ conf_bin.scatter_add_(dim=0, index=indices, src=confidences)
51
+ conf_bin = torch.nan_to_num(conf_bin / count_bin)
52
+
53
+ acc_bin.scatter_add_(dim=0, index=indices, src=accuracies)
54
+ acc_bin = torch.nan_to_num(acc_bin / count_bin)
55
+
56
+ prop_bin = count_bin / count_bin.sum()
57
+ return acc_bin, conf_bin, prop_bin
58
+
59
+
60
+ def _ce_compute(
61
+ confidences: Tensor,
62
+ accuracies: Tensor,
63
+ bin_boundaries: Union[Tensor, int],
64
+ norm: str = "l1",
65
+ debias: bool = False,
66
+ ) -> Tensor:
67
+ """Computes the calibration error given the provided bin boundaries and norm.
68
+
69
+ Args:
70
+ confidences: The confidence (i.e. predicted prob) of the top1 prediction.
71
+ accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.
72
+ bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.
73
+ norm: Norm function to use when computing calibration error. Defaults to "l1".
74
+ debias: Apply debiasing to L2 norm computation as in
75
+ `Verified Uncertainty Calibration`_. Defaults to False.
76
+
77
+ Raises:
78
+ ValueError: If an unsupported norm function is provided.
79
+
80
+ Returns:
81
+ Tensor: Calibration error scalar.
82
+ """
83
+ if isinstance(bin_boundaries, int):
84
+ bin_boundaries = torch.linspace(0, 1, bin_boundaries + 1, dtype=torch.float, device=confidences.device)
85
+
86
+ if norm not in {"l1", "l2", "max"}:
87
+ raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ")
88
+
89
+ with torch.no_grad():
90
+ acc_bin, conf_bin, prop_bin = _binning_bucketize(confidences, accuracies, bin_boundaries)
91
+
92
+ if norm == "l1":
93
+ ce = torch.sum(torch.abs(acc_bin - conf_bin) * prop_bin)
94
+ elif norm == "max":
95
+ ce = torch.max(torch.abs(acc_bin - conf_bin))
96
+ elif norm == "l2":
97
+ ce = torch.sum(torch.pow(acc_bin - conf_bin, 2) * prop_bin)
98
+ # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn.
99
+ if debias:
100
+ # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from
101
+ # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/
102
+ debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1)
103
+ ce += torch.sum(torch.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin
104
+ ce = torch.sqrt(ce) if ce > 0 else torch.tensor(0)
105
+ return ce
106
+
107
+
108
+ def _binary_calibration_error_arg_validation(
109
+ n_bins: int,
110
+ norm: Literal["l1", "l2", "max"] = "l1",
111
+ ignore_index: Optional[int] = None,
112
+ ) -> None:
113
+ if not isinstance(n_bins, int) or n_bins < 1:
114
+ raise ValueError(f"Expected argument `n_bins` to be an integer larger than 0, but got {n_bins}")
115
+ allowed_norm = ("l1", "l2", "max")
116
+ if norm not in allowed_norm:
117
+ raise ValueError(f"Expected argument `norm` to be one of {allowed_norm}, but got {norm}.")
118
+ if ignore_index is not None and not isinstance(ignore_index, int):
119
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
120
+
121
+
122
+ def _binary_calibration_error_tensor_validation(
123
+ preds: Tensor, target: Tensor, ignore_index: Optional[int] = None
124
+ ) -> None:
125
+ _binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
126
+ if not preds.is_floating_point():
127
+ raise ValueError(
128
+ "Expected argument `preds` to be floating tensor with probabilities/logits"
129
+ f" but got tensor with dtype {preds.dtype}"
130
+ )
131
+
132
+
133
+ def _binary_calibration_error_update(preds: Tensor, target: Tensor) -> Tensor:
134
+ confidences, accuracies = preds, target
135
+ return confidences, accuracies
136
+
137
+
138
+ def binary_calibration_error(
139
+ preds: Tensor,
140
+ target: Tensor,
141
+ n_bins: int = 15,
142
+ norm: Literal["l1", "l2", "max"] = "l1",
143
+ ignore_index: Optional[int] = None,
144
+ validate_args: bool = True,
145
+ ) -> Tensor:
146
+ r"""`Top-label Calibration Error`_ for binary tasks. The expected calibration error can be used to quantify how
147
+ well a given model is calibrated e.g. how well the predicted output probabilities of the model matches the
148
+ actual probabilities of the ground truth distribution.
149
+
150
+ Three different norms are implemented, each corresponding to variations on the calibration error metric.
151
+
152
+ .. math::
153
+ \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
154
+
155
+ .. math::
156
+ \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
157
+
158
+ .. math::
159
+ \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
160
+
161
+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
162
+ predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
163
+ in an uniform way in the [0,1] range.
164
+
165
+ Accepts the following input tensors:
166
+
167
+ - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
168
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
169
+ sigmoid per element.
170
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
171
+ only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
172
+
173
+ Additional dimension ``...`` will be flattened into the batch dimension.
174
+
175
+ Args:
176
+ preds: Tensor with predictions
177
+ target: Tensor with true labels
178
+ n_bins: Number of bins to use when computing the metric.
179
+ norm: Norm used to compare empirical and expected probability bins.
180
+ ignore_index:
181
+ Specifies a target value that is ignored and does not contribute to the metric calculation
182
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
183
+ Set to ``False`` for faster computations.
184
+
185
+ Example:
186
+ >>> from torchmetrics.functional.classification import binary_calibration_error
187
+ >>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75])
188
+ >>> target = torch.tensor([0, 0, 1, 1, 1])
189
+ >>> binary_calibration_error(preds, target, n_bins=2, norm='l1')
190
+ tensor(0.2900)
191
+ >>> binary_calibration_error(preds, target, n_bins=2, norm='l2')
192
+ tensor(0.2918)
193
+ >>> binary_calibration_error(preds, target, n_bins=2, norm='max')
194
+ tensor(0.3167)
195
+ """
196
+ if validate_args:
197
+ _binary_calibration_error_arg_validation(n_bins, norm, ignore_index)
198
+ _binary_calibration_error_tensor_validation(preds, target, ignore_index)
199
+ preds, target = _binary_confusion_matrix_format(
200
+ preds, target, threshold=0.0, ignore_index=ignore_index, convert_to_labels=False
201
+ )
202
+ confidences, accuracies = _binary_calibration_error_update(preds, target)
203
+ return _ce_compute(confidences, accuracies, n_bins, norm)
204
+
205
+
206
+ def _multiclass_calibration_error_arg_validation(
207
+ num_classes: int,
208
+ n_bins: int,
209
+ norm: Literal["l1", "l2", "max"] = "l1",
210
+ ignore_index: Optional[int] = None,
211
+ ) -> None:
212
+ if not isinstance(num_classes, int) or num_classes < 2:
213
+ raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
214
+ if not isinstance(n_bins, int) or n_bins < 1:
215
+ raise ValueError(f"Expected argument `n_bins` to be an integer larger than 0, but got {n_bins}")
216
+ allowed_norm = ("l1", "l2", "max")
217
+ if norm not in allowed_norm:
218
+ raise ValueError(f"Expected argument `norm` to be one of {allowed_norm}, but got {norm}.")
219
+ if ignore_index is not None and not isinstance(ignore_index, int):
220
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
221
+
222
+
223
+ def _multiclass_calibration_error_tensor_validation(
224
+ preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None
225
+ ) -> None:
226
+ _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
227
+ if not preds.is_floating_point():
228
+ raise ValueError(
229
+ "Expected argument `preds` to be floating tensor with probabilities/logits"
230
+ f" but got tensor with dtype {preds.dtype}"
231
+ )
232
+
233
+
234
+ def _multiclass_calibration_error_update(
235
+ preds: Tensor,
236
+ target: Tensor,
237
+ ) -> Tensor:
238
+ if not torch.all((0 <= preds) * (preds <= 1)):
239
+ preds = preds.softmax(1)
240
+ confidences, predictions = preds.max(dim=1)
241
+ accuracies = predictions.eq(target)
242
+ return confidences.float(), accuracies.float()
243
+
244
+
245
+ def multiclass_calibration_error(
246
+ preds: Tensor,
247
+ target: Tensor,
248
+ num_classes: int,
249
+ n_bins: int = 15,
250
+ norm: Literal["l1", "l2", "max"] = "l1",
251
+ ignore_index: Optional[int] = None,
252
+ validate_args: bool = True,
253
+ ) -> Tensor:
254
+ r"""`Top-label Calibration Error`_ for multiclass tasks. The expected calibration error can be used to quantify
255
+ how well a given model is calibrated e.g. how well the predicted output probabilities of the model matches the
256
+ actual probabilities of the ground truth distribution.
257
+
258
+ Three different norms are implemented, each corresponding to variations on the calibration error metric.
259
+
260
+ .. math::
261
+ \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
262
+
263
+ .. math::
264
+ \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
265
+
266
+ .. math::
267
+ \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
268
+
269
+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
270
+ predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
271
+ in an uniform way in the [0,1] range.
272
+
273
+ Accepts the following input tensors:
274
+
275
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
276
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
277
+ softmax per sample.
278
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
279
+ only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
280
+
281
+ Additional dimension ``...`` will be flattened into the batch dimension.
282
+
283
+ Args:
284
+ preds: Tensor with predictions
285
+ target: Tensor with true labels
286
+ num_classes: Integer specifing the number of classes
287
+ n_bins: Number of bins to use when computing the metric.
288
+ norm: Norm used to compare empirical and expected probability bins.
289
+ ignore_index:
290
+ Specifies a target value that is ignored and does not contribute to the metric calculation
291
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
292
+ Set to ``False`` for faster computations.
293
+
294
+ Example:
295
+ >>> from torchmetrics.functional.classification import multiclass_calibration_error
296
+ >>> preds = torch.tensor([[0.25, 0.20, 0.55],
297
+ ... [0.55, 0.05, 0.40],
298
+ ... [0.10, 0.30, 0.60],
299
+ ... [0.90, 0.05, 0.05]])
300
+ >>> target = torch.tensor([0, 1, 2, 0])
301
+ >>> multiclass_calibration_error(preds, target, num_classes=3, n_bins=3, norm='l1')
302
+ tensor(0.2000)
303
+ >>> multiclass_calibration_error(preds, target, num_classes=3, n_bins=3, norm='l2')
304
+ tensor(0.2082)
305
+ >>> multiclass_calibration_error(preds, target, num_classes=3, n_bins=3, norm='max')
306
+ tensor(0.2333)
307
+ """
308
+ if validate_args:
309
+ _multiclass_calibration_error_arg_validation(num_classes, n_bins, norm, ignore_index)
310
+ _multiclass_calibration_error_tensor_validation(preds, target, num_classes, ignore_index)
311
+ preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index, convert_to_labels=False)
312
+ confidences, accuracies = _multiclass_calibration_error_update(preds, target)
313
+ return _ce_compute(confidences, accuracies, n_bins, norm)
314
+
315
+
316
+ def calibration_error(
317
+ preds: Tensor,
318
+ target: Tensor,
319
+ task: Literal["binary", "multiclass"] = None,
320
+ n_bins: int = 15,
321
+ norm: Literal["l1", "l2", "max"] = "l1",
322
+ num_classes: Optional[int] = None,
323
+ ignore_index: Optional[int] = None,
324
+ validate_args: bool = True,
325
+ ) -> Tensor:
326
+ r"""`Top-label Calibration Error`_. The expected calibration error can be used to quantify how well a given
327
+ model is calibrated e.g. how well the predicted output probabilities of the model matches the actual
328
+ probabilities of the ground truth distribution.
329
+
330
+ Three different norms are implemented, each corresponding to variations on the calibration error metric.
331
+
332
+ .. math::
333
+ \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|, \text{L1 norm (Expected Calibration Error)}
334
+
335
+ .. math::
336
+ \text{MCE} = \max_{i} (p_i - c_i), \text{Infinity norm (Maximum Calibration Error)}
337
+
338
+ .. math::
339
+ \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}, \text{L2 norm (Root Mean Square Calibration Error)}
340
+
341
+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`, :math:`c_i` is the average confidence of
342
+ predictions in bin :math:`i`, and :math:`b_i` is the fraction of data points in bin :math:`i`. Bins are constructed
343
+ in an uniform way in the [0,1] range.
344
+
345
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
346
+ ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
347
+ :func:`binary_calibration_error` and :func:`multiclass_calibration_error` for the specific details of
348
+ each argument influence and examples.
349
+ """
350
+ assert norm is not None
351
+ if task == "binary":
352
+ return binary_calibration_error(preds, target, n_bins, norm, ignore_index, validate_args)
353
+ if task == "multiclass":
354
+ assert isinstance(num_classes, int)
355
+ return multiclass_calibration_error(preds, target, num_classes, n_bins, norm, ignore_index, validate_args)
356
+ raise ValueError(f"Expected argument `task` to either be `'binary'` or `'multiclass'` but got {task}")
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/cohen_kappa.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.confusion_matrix import (
21
+ _binary_confusion_matrix_arg_validation,
22
+ _binary_confusion_matrix_format,
23
+ _binary_confusion_matrix_tensor_validation,
24
+ _binary_confusion_matrix_update,
25
+ _multiclass_confusion_matrix_arg_validation,
26
+ _multiclass_confusion_matrix_format,
27
+ _multiclass_confusion_matrix_tensor_validation,
28
+ _multiclass_confusion_matrix_update,
29
+ )
30
+
31
+
32
+ def _cohen_kappa_reduce(confmat: Tensor, weights: Optional[Literal["linear", "quadratic", "none"]] = None) -> Tensor:
33
+ """Reduce an un-normalized confusion matrix of shape (n_classes, n_classes) into the cohen kappa score."""
34
+ confmat = confmat.float() if not confmat.is_floating_point() else confmat
35
+ n_classes = confmat.shape[0]
36
+ sum0 = confmat.sum(dim=0, keepdim=True)
37
+ sum1 = confmat.sum(dim=1, keepdim=True)
38
+ expected = sum1 @ sum0 / sum0.sum() # outer product
39
+
40
+ if weights is None or weights == "none":
41
+ w_mat = torch.ones_like(confmat).flatten()
42
+ w_mat[:: n_classes + 1] = 0
43
+ w_mat = w_mat.reshape(n_classes, n_classes)
44
+ elif weights in ("linear", "quadratic"):
45
+ w_mat = torch.zeros_like(confmat)
46
+ w_mat += torch.arange(n_classes, dtype=w_mat.dtype, device=w_mat.device)
47
+ if weights == "linear":
48
+ w_mat = torch.abs(w_mat - w_mat.T)
49
+ else:
50
+ w_mat = torch.pow(w_mat - w_mat.T, 2.0)
51
+ else:
52
+ raise ValueError(
53
+ f"Received {weights} for argument ``weights`` but should be either" " None, 'linear' or 'quadratic'"
54
+ )
55
+ k = torch.sum(w_mat * confmat) / torch.sum(w_mat * expected)
56
+ return 1 - k
57
+
58
+
59
+ def _binary_cohen_kappa_arg_validation(
60
+ threshold: float = 0.5,
61
+ ignore_index: Optional[int] = None,
62
+ weights: Optional[Literal["linear", "quadratic", "none"]] = None,
63
+ ) -> None:
64
+ """Validate non tensor input.
65
+
66
+ - ``threshold`` has to be a float in the [0,1] range
67
+ - ``ignore_index`` has to be None or int
68
+ - ``weights`` has to be "linear" | "quadratic" | "none" | None
69
+ """
70
+ _binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize=None)
71
+ allowed_weights = ("linear", "quadratic", "none", None)
72
+ if weights not in allowed_weights:
73
+ raise ValueError(f"Expected argument `weight` to be one of {allowed_weights}, but got {weights}.")
74
+
75
+
76
+ def binary_cohen_kappa(
77
+ preds: Tensor,
78
+ target: Tensor,
79
+ threshold: float = 0.5,
80
+ weights: Optional[Literal["linear", "quadratic", "none"]] = None,
81
+ ignore_index: Optional[int] = None,
82
+ validate_args: bool = True,
83
+ ) -> Tensor:
84
+ r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement for binary tasks. It is defined
85
+ as.
86
+
87
+ .. math::
88
+ \kappa = (p_o - p_e) / (1 - p_e)
89
+
90
+ where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
91
+ the expected agreement when both annotators assign labels randomly. Note that
92
+ :math:`p_e` is estimated using a per-annotator empirical prior over the
93
+ class labels.
94
+
95
+ Accepts the following input tensors:
96
+
97
+ - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
98
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
99
+ we convert to int tensor with thresholding using the value in ``threshold``.
100
+ - ``target`` (int tensor): ``(N, ...)``
101
+
102
+ Additional dimension ``...`` will be flattened into the batch dimension.
103
+
104
+ Args:
105
+ preds: Tensor with predictions
106
+ target: Tensor with true labels
107
+ threshold: Threshold for transforming probability to binary (0,1) predictions
108
+ weights: Weighting type to calculate the score. Choose from:
109
+
110
+ - ``None`` or ``'none'``: no weighting
111
+ - ``'linear'``: linear weighting
112
+ - ``'quadratic'``: quadratic weighting
113
+ ignore_index:
114
+ Specifies a target value that is ignored and does not contribute to the metric calculation
115
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
116
+ Set to ``False`` for faster computations.
117
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
118
+
119
+ Example (preds is int tensor):
120
+ >>> from torchmetrics.functional.classification import binary_cohen_kappa
121
+ >>> target = torch.tensor([1, 1, 0, 0])
122
+ >>> preds = torch.tensor([0, 1, 0, 0])
123
+ >>> binary_cohen_kappa(preds, target)
124
+ tensor(0.5000)
125
+
126
+ Example (preds is float tensor):
127
+ >>> from torchmetrics.functional.classification import binary_cohen_kappa
128
+ >>> target = torch.tensor([1, 1, 0, 0])
129
+ >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01])
130
+ >>> binary_cohen_kappa(preds, target)
131
+ tensor(0.5000)
132
+ """
133
+ if validate_args:
134
+ _binary_cohen_kappa_arg_validation(threshold, ignore_index, weights)
135
+ _binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
136
+ preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index)
137
+ confmat = _binary_confusion_matrix_update(preds, target)
138
+ return _cohen_kappa_reduce(confmat, weights)
139
+
140
+
141
+ def _multiclass_cohen_kappa_arg_validation(
142
+ num_classes: int,
143
+ ignore_index: Optional[int] = None,
144
+ weights: Optional[Literal["linear", "quadratic", "none"]] = None,
145
+ ) -> None:
146
+ """Validate non tensor input.
147
+
148
+ - ``num_classes`` has to be a int larger than 1
149
+ - ``ignore_index`` has to be None or int
150
+ - ``weights`` has to be "linear" | "quadratic" | "none" | None
151
+ """
152
+ _multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize=None)
153
+ allowed_weights = ("linear", "quadratic", "none", None)
154
+ if weights not in allowed_weights:
155
+ raise ValueError(f"Expected argument `weight` to be one of {allowed_weights}, but got {weights}.")
156
+
157
+
158
+ def multiclass_cohen_kappa(
159
+ preds: Tensor,
160
+ target: Tensor,
161
+ num_classes: int,
162
+ weights: Optional[Literal["linear", "quadratic", "none"]] = None,
163
+ ignore_index: Optional[int] = None,
164
+ validate_args: bool = True,
165
+ ) -> Tensor:
166
+ r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement for multiclass tasks. It is
167
+ defined as.
168
+
169
+ .. math::
170
+ \kappa = (p_o - p_e) / (1 - p_e)
171
+
172
+ where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
173
+ the expected agreement when both annotators assign labels randomly. Note that
174
+ :math:`p_e` is estimated using a per-annotator empirical prior over the
175
+ class labels.
176
+
177
+ Accepts the following input tensors:
178
+
179
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
180
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
181
+ an int tensor.
182
+ - ``target`` (int tensor): ``(N, ...)``
183
+
184
+ Additional dimension ``...`` will be flattened into the batch dimension.
185
+
186
+ Args:
187
+ preds: Tensor with predictions
188
+ target: Tensor with true labels
189
+ num_classes: Integer specifing the number of classes
190
+ weights: Weighting type to calculate the score. Choose from:
191
+
192
+ - ``None`` or ``'none'``: no weighting
193
+ - ``'linear'``: linear weighting
194
+ - ``'quadratic'``: quadratic weighting
195
+
196
+
197
+ ignore_index:
198
+ Specifies a target value that is ignored and does not contribute to the metric calculation
199
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
200
+ Set to ``False`` for faster computations.
201
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
202
+
203
+ Example (pred is integer tensor):
204
+ >>> from torchmetrics.functional.classification import multiclass_cohen_kappa
205
+ >>> target = torch.tensor([2, 1, 0, 0])
206
+ >>> preds = torch.tensor([2, 1, 0, 1])
207
+ >>> multiclass_cohen_kappa(preds, target, num_classes=3)
208
+ tensor(0.6364)
209
+
210
+ Example (pred is float tensor):
211
+ >>> from torchmetrics.functional.classification import multiclass_cohen_kappa
212
+ >>> target = torch.tensor([2, 1, 0, 0])
213
+ >>> preds = torch.tensor([
214
+ ... [0.16, 0.26, 0.58],
215
+ ... [0.22, 0.61, 0.17],
216
+ ... [0.71, 0.09, 0.20],
217
+ ... [0.05, 0.82, 0.13],
218
+ ... ])
219
+ >>> multiclass_cohen_kappa(preds, target, num_classes=3)
220
+ tensor(0.6364)
221
+ """
222
+ if validate_args:
223
+ _multiclass_cohen_kappa_arg_validation(num_classes, ignore_index, weights)
224
+ _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
225
+ preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index)
226
+ confmat = _multiclass_confusion_matrix_update(preds, target, num_classes)
227
+ return _cohen_kappa_reduce(confmat, weights)
228
+
229
+
230
+ def cohen_kappa(
231
+ preds: Tensor,
232
+ target: Tensor,
233
+ task: Literal["binary", "multiclass"],
234
+ threshold: float = 0.5,
235
+ num_classes: Optional[int] = None,
236
+ weights: Optional[Literal["linear", "quadratic", "none"]] = None,
237
+ ignore_index: Optional[int] = None,
238
+ validate_args: bool = True,
239
+ ) -> Tensor:
240
+ r"""Calculates `Cohen's kappa score`_ that measures inter-annotator agreement. It is defined as.
241
+
242
+ .. math::
243
+ \kappa = (p_o - p_e) / (1 - p_e)
244
+
245
+ where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
246
+ the expected agreement when both annotators assign labels randomly. Note that
247
+ :math:`p_e` is estimated using a per-annotator empirical prior over the
248
+ class labels.
249
+
250
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
251
+ ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
252
+ :func:`binary_cohen_kappa` and :func:`multiclass_cohen_kappa` for the specific details of
253
+ each argument influence and examples.
254
+
255
+ Legacy Example:
256
+ >>> target = torch.tensor([1, 1, 0, 0])
257
+ >>> preds = torch.tensor([0, 1, 0, 0])
258
+ >>> cohen_kappa(preds, target, task="multiclass", num_classes=2)
259
+ tensor(0.5000)
260
+ """
261
+ if task == "binary":
262
+ return binary_cohen_kappa(preds, target, threshold, weights, ignore_index, validate_args)
263
+ if task == "multiclass":
264
+ assert isinstance(num_classes, int)
265
+ return multiclass_cohen_kappa(preds, target, num_classes, weights, ignore_index, validate_args)
266
+ raise ValueError(f"Expected argument `task` to either be `'binary'` or `'multiclass'` but got {task}")
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/confusion_matrix.py ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional, Tuple
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.utilities.checks import _check_same_shape
21
+ from torchmetrics.utilities.data import _bincount
22
+ from torchmetrics.utilities.prints import rank_zero_warn
23
+
24
+
25
+ def _confusion_matrix_reduce(
26
+ confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None
27
+ ) -> Tensor:
28
+ """Reduce an un-normalized confusion matrix
29
+ Args:
30
+ confmat: un-normalized confusion matrix
31
+ normalize: normalization method.
32
+ - `"true"` will divide by the sum of the column dimension.
33
+ - `"pred"` will divide by the sum of the row dimension.
34
+ - `"all"` will divide by the sum of the full matrix
35
+ - `"none"` or `None` will apply no reduction
36
+
37
+ Returns:
38
+ Normalized confusion matrix
39
+ """
40
+ allowed_normalize = ("true", "pred", "all", "none", None)
41
+ if normalize not in allowed_normalize:
42
+ raise ValueError(f"Argument `normalize` needs to one of the following: {allowed_normalize}")
43
+ if normalize is not None and normalize != "none":
44
+ confmat = confmat.float() if not confmat.is_floating_point() else confmat
45
+ if normalize == "true":
46
+ confmat = confmat / confmat.sum(axis=-1, keepdim=True)
47
+ elif normalize == "pred":
48
+ confmat = confmat / confmat.sum(axis=-2, keepdim=True)
49
+ elif normalize == "all":
50
+ confmat = confmat / confmat.sum(axis=[-2, -1], keepdim=True)
51
+
52
+ nan_elements = confmat[torch.isnan(confmat)].nelement()
53
+ if nan_elements:
54
+ confmat[torch.isnan(confmat)] = 0
55
+ rank_zero_warn(f"{nan_elements} NaN values found in confusion matrix have been replaced with zeros.")
56
+ return confmat
57
+
58
+
59
+ def _binary_confusion_matrix_arg_validation(
60
+ threshold: float = 0.5,
61
+ ignore_index: Optional[int] = None,
62
+ normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
63
+ ) -> None:
64
+ """Validate non tensor input.
65
+
66
+ - ``threshold`` has to be a float in the [0,1] range
67
+ - ``ignore_index`` has to be None or int
68
+ - ``normalize`` has to be "true" | "pred" | "all" | "none" | None
69
+ """
70
+ if not (isinstance(threshold, float) and (0 <= threshold <= 1)):
71
+ raise ValueError(f"Expected argument `threshold` to be a float in the [0,1] range, but got {threshold}.")
72
+ if ignore_index is not None and not isinstance(ignore_index, int):
73
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
74
+ allowed_normalize = ("true", "pred", "all", "none", None)
75
+ if normalize not in allowed_normalize:
76
+ raise ValueError(f"Expected argument `normalize` to be one of {allowed_normalize}, but got {normalize}.")
77
+
78
+
79
+ def _binary_confusion_matrix_tensor_validation(
80
+ preds: Tensor, target: Tensor, ignore_index: Optional[int] = None
81
+ ) -> None:
82
+ """Validate tensor input.
83
+
84
+ - tensors have to be of same shape
85
+ - all values in target tensor that are not ignored have to be in {0, 1}
86
+ - if pred tensor is not floating point, then all values also have to be in {0, 1}
87
+ """
88
+ # Check that they have same shape
89
+ _check_same_shape(preds, target)
90
+
91
+ # Check that target only contains {0,1} values or value in ignore_index
92
+ unique_values = torch.unique(target)
93
+ if ignore_index is None:
94
+ check = torch.any((unique_values != 0) & (unique_values != 1))
95
+ else:
96
+ check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
97
+ if check:
98
+ raise RuntimeError(
99
+ f"Detected the following values in `target`: {unique_values} but expected only"
100
+ f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}."
101
+ )
102
+
103
+ # If preds is label tensor, also check that it only contains {0,1} values
104
+ if not preds.is_floating_point():
105
+ unique_values = torch.unique(preds)
106
+ if torch.any((unique_values != 0) & (unique_values != 1)):
107
+ raise RuntimeError(
108
+ f"Detected the following values in `preds`: {unique_values} but expected only"
109
+ " the following values [0,1] since preds is a label tensor."
110
+ )
111
+
112
+
113
+ def _binary_confusion_matrix_format(
114
+ preds: Tensor,
115
+ target: Tensor,
116
+ threshold: float = 0.5,
117
+ ignore_index: Optional[int] = None,
118
+ convert_to_labels: bool = True,
119
+ ) -> Tuple[Tensor, Tensor]:
120
+ """Convert all input to label format.
121
+
122
+ - Remove all datapoints that should be ignored
123
+ - If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range
124
+ - If preds tensor is floating point, thresholds afterwards
125
+ """
126
+ preds = preds.flatten()
127
+ target = target.flatten()
128
+ if ignore_index is not None:
129
+ idx = target != ignore_index
130
+ preds = preds[idx]
131
+ target = target[idx]
132
+
133
+ if preds.is_floating_point():
134
+ if not torch.all((0 <= preds) * (preds <= 1)):
135
+ # preds is logits, convert with sigmoid
136
+ preds = preds.sigmoid()
137
+ if convert_to_labels:
138
+ preds = preds > threshold
139
+
140
+ return preds, target
141
+
142
+
143
+ def _binary_confusion_matrix_update(preds: Tensor, target: Tensor) -> Tensor:
144
+ """Computes the bins to update the confusion matrix with."""
145
+ unique_mapping = (target * 2 + preds).to(torch.long)
146
+ bins = _bincount(unique_mapping, minlength=4)
147
+ return bins.reshape(2, 2)
148
+
149
+
150
+ def _binary_confusion_matrix_compute(
151
+ confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None
152
+ ) -> Tensor:
153
+ """Reduces the confusion matrix to it's final form.
154
+
155
+ Normalization technique can be chosen by ``normalize``.
156
+ """
157
+ return _confusion_matrix_reduce(confmat, normalize)
158
+
159
+
160
+ def binary_confusion_matrix(
161
+ preds: Tensor,
162
+ target: Tensor,
163
+ threshold: float = 0.5,
164
+ normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
165
+ ignore_index: Optional[int] = None,
166
+ validate_args: bool = True,
167
+ ) -> Tensor:
168
+ r"""Computes the `confusion matrix`_ for binary tasks.
169
+
170
+ Accepts the following input tensors:
171
+
172
+ - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
173
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
174
+ we convert to int tensor with thresholding using the value in ``threshold``.
175
+ - ``target`` (int tensor): ``(N, ...)``
176
+
177
+ Additional dimension ``...`` will be flattened into the batch dimension.
178
+
179
+ Args:
180
+ preds: Tensor with predictions
181
+ target: Tensor with true labels
182
+ threshold: Threshold for transforming probability to binary (0,1) predictions
183
+ normalize: Normalization mode for confusion matrix. Choose from:
184
+
185
+ - ``None`` or ``'none'``: no normalization (default)
186
+ - ``'true'``: normalization over the targets (most commonly used)
187
+ - ``'pred'``: normalization over the predictions
188
+ - ``'all'``: normalization over the whole matrix
189
+ ignore_index:
190
+ Specifies a target value that is ignored and does not contribute to the metric calculation
191
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
192
+ Set to ``False`` for faster computations.
193
+
194
+ Returns:
195
+ A ``[2, 2]`` tensor
196
+
197
+ Example (preds is int tensor):
198
+ >>> from torchmetrics.functional.classification import binary_confusion_matrix
199
+ >>> target = torch.tensor([1, 1, 0, 0])
200
+ >>> preds = torch.tensor([0, 1, 0, 0])
201
+ >>> binary_confusion_matrix(preds, target)
202
+ tensor([[2, 0],
203
+ [1, 1]])
204
+
205
+ Example (preds is float tensor):
206
+ >>> from torchmetrics.functional.classification import binary_confusion_matrix
207
+ >>> target = torch.tensor([1, 1, 0, 0])
208
+ >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01])
209
+ >>> binary_confusion_matrix(preds, target)
210
+ tensor([[2, 0],
211
+ [1, 1]])
212
+ """
213
+ if validate_args:
214
+ _binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize)
215
+ _binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
216
+ preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index)
217
+ confmat = _binary_confusion_matrix_update(preds, target)
218
+ return _binary_confusion_matrix_compute(confmat, normalize)
219
+
220
+
221
+ def _multiclass_confusion_matrix_arg_validation(
222
+ num_classes: int,
223
+ ignore_index: Optional[int] = None,
224
+ normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
225
+ ) -> None:
226
+ """Validate non tensor input.
227
+
228
+ - ``num_classes`` has to be a int larger than 1
229
+ - ``ignore_index`` has to be None or int
230
+ - ``normalize`` has to be "true" | "pred" | "all" | "none" | None
231
+ """
232
+ if not isinstance(num_classes, int) or num_classes < 2:
233
+ raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
234
+ if ignore_index is not None and not isinstance(ignore_index, int):
235
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
236
+ allowed_normalize = ("true", "pred", "all", "none", None)
237
+ if normalize not in allowed_normalize:
238
+ raise ValueError(f"Expected argument `normalize` to be one of {allowed_normalize}, but got {normalize}.")
239
+
240
+
241
+ def _multiclass_confusion_matrix_tensor_validation(
242
+ preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None
243
+ ) -> None:
244
+ """Validate tensor input.
245
+
246
+ - if target has one more dimension than preds, then all dimensions except for preds.shape[1] should match
247
+ exactly. preds.shape[1] should have size equal to number of classes
248
+ - if preds and target have same number of dims, then all dimensions should match
249
+ - all values in target tensor that are not ignored have to be {0, ..., num_classes - 1}
250
+ - if pred tensor is not floating point, then all values also have to be in {0, ..., num_classes - 1}
251
+ """
252
+ if preds.ndim == target.ndim + 1:
253
+ if not preds.is_floating_point():
254
+ raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.")
255
+ if preds.shape[1] != num_classes:
256
+ raise ValueError(
257
+ "If `preds` have one dimension more than `target`, `preds.shape[1]` should be"
258
+ " equal to number of classes."
259
+ )
260
+ if preds.shape[2:] != target.shape[1:]:
261
+ raise ValueError(
262
+ "If `preds` have one dimension more than `target`, the shape of `preds` should be"
263
+ " (N, C, ...), and the shape of `target` should be (N, ...)."
264
+ )
265
+ elif preds.ndim == target.ndim:
266
+ if preds.shape != target.shape:
267
+ raise ValueError(
268
+ "The `preds` and `target` should have the same shape,",
269
+ f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.",
270
+ )
271
+ else:
272
+ raise ValueError(
273
+ "Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)"
274
+ " and `preds` should be (N, C, ...)."
275
+ )
276
+
277
+ num_unique_values = len(torch.unique(target))
278
+ if ignore_index is None:
279
+ check = num_unique_values > num_classes
280
+ else:
281
+ check = num_unique_values > num_classes + 1
282
+ if check:
283
+ raise RuntimeError(
284
+ "Detected more unique values in `target` than `num_classes`. Expected only "
285
+ f"{num_classes if ignore_index is None else num_classes + 1} but found "
286
+ f"{num_unique_values} in `target`."
287
+ )
288
+
289
+ if not preds.is_floating_point():
290
+ num_unique_values = len(torch.unique(preds))
291
+ if num_unique_values > num_classes:
292
+ raise RuntimeError(
293
+ "Detected more unique values in `preds` than `num_classes`. Expected only "
294
+ f"{num_classes} but found {num_unique_values} in `preds`."
295
+ )
296
+
297
+
298
+ def _multiclass_confusion_matrix_format(
299
+ preds: Tensor,
300
+ target: Tensor,
301
+ ignore_index: Optional[int] = None,
302
+ convert_to_labels: bool = True,
303
+ ) -> Tuple[Tensor, Tensor]:
304
+ """Convert all input to label format.
305
+
306
+ - Applies argmax if preds have one more dimension than target
307
+ - Remove all datapoints that should be ignored
308
+ """
309
+ # Apply argmax if we have one more dimension
310
+ if preds.ndim == target.ndim + 1 and convert_to_labels:
311
+ preds = preds.argmax(dim=1)
312
+
313
+ if convert_to_labels:
314
+ preds = preds.flatten()
315
+ else:
316
+ preds = torch.movedim(preds, 1, -1).reshape(-1, preds.shape[1])
317
+ target = target.flatten()
318
+
319
+ if ignore_index is not None:
320
+ idx = target != ignore_index
321
+ preds = preds[idx]
322
+ target = target[idx]
323
+
324
+ return preds, target
325
+
326
+
327
+ def _multiclass_confusion_matrix_update(preds: Tensor, target: Tensor, num_classes: int) -> Tensor:
328
+ """Compute the bins to update the confusion matrix with."""
329
+ unique_mapping = target.to(torch.long) * num_classes + preds.to(torch.long)
330
+ bins = _bincount(unique_mapping, minlength=num_classes**2)
331
+ return bins.reshape(num_classes, num_classes)
332
+
333
+
334
+ def _multiclass_confusion_matrix_compute(
335
+ confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None
336
+ ) -> Tensor:
337
+ """Reduces the confusion matrix to it's final form.
338
+
339
+ Normalization technique can be chosen by ``normalize``.
340
+ """
341
+ return _confusion_matrix_reduce(confmat, normalize)
342
+
343
+
344
+ def multiclass_confusion_matrix(
345
+ preds: Tensor,
346
+ target: Tensor,
347
+ num_classes: int,
348
+ normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
349
+ ignore_index: Optional[int] = None,
350
+ validate_args: bool = True,
351
+ ) -> Tensor:
352
+ r"""Computes the `confusion matrix`_ for multiclass tasks.
353
+
354
+ Accepts the following input tensors:
355
+
356
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
357
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
358
+ an int tensor.
359
+ - ``target`` (int tensor): ``(N, ...)``
360
+
361
+ Additional dimension ``...`` will be flattened into the batch dimension.
362
+
363
+ Args:
364
+ preds: Tensor with predictions
365
+ target: Tensor with true labels
366
+ num_classes: Integer specifing the number of classes
367
+ normalize: Normalization mode for confusion matrix. Choose from:
368
+
369
+ - ``None`` or ``'none'``: no normalization (default)
370
+ - ``'true'``: normalization over the targets (most commonly used)
371
+ - ``'pred'``: normalization over the predictions
372
+ - ``'all'``: normalization over the whole matrix
373
+ ignore_index:
374
+ Specifies a target value that is ignored and does not contribute to the metric calculation
375
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
376
+ Set to ``False`` for faster computations.
377
+
378
+ Returns:
379
+ A ``[num_classes, num_classes]`` tensor
380
+
381
+ Example (pred is integer tensor):
382
+ >>> from torchmetrics.functional.classification import multiclass_confusion_matrix
383
+ >>> target = torch.tensor([2, 1, 0, 0])
384
+ >>> preds = torch.tensor([2, 1, 0, 1])
385
+ >>> multiclass_confusion_matrix(preds, target, num_classes=3)
386
+ tensor([[1, 1, 0],
387
+ [0, 1, 0],
388
+ [0, 0, 1]])
389
+
390
+ Example (pred is float tensor):
391
+ >>> from torchmetrics.functional.classification import multiclass_confusion_matrix
392
+ >>> target = torch.tensor([2, 1, 0, 0])
393
+ >>> preds = torch.tensor([
394
+ ... [0.16, 0.26, 0.58],
395
+ ... [0.22, 0.61, 0.17],
396
+ ... [0.71, 0.09, 0.20],
397
+ ... [0.05, 0.82, 0.13],
398
+ ... ])
399
+ >>> multiclass_confusion_matrix(preds, target, num_classes=3)
400
+ tensor([[1, 1, 0],
401
+ [0, 1, 0],
402
+ [0, 0, 1]])
403
+ """
404
+ if validate_args:
405
+ _multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize)
406
+ _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
407
+ preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index)
408
+ confmat = _multiclass_confusion_matrix_update(preds, target, num_classes)
409
+ return _multiclass_confusion_matrix_compute(confmat, normalize)
410
+
411
+
412
+ def _multilabel_confusion_matrix_arg_validation(
413
+ num_labels: int,
414
+ threshold: float = 0.5,
415
+ ignore_index: Optional[int] = None,
416
+ normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
417
+ ) -> None:
418
+ """Validate non tensor input.
419
+
420
+ - ``num_labels`` should be an int larger than 1
421
+ - ``threshold`` has to be a float in the [0,1] range
422
+ - ``ignore_index`` has to be None or int
423
+ - ``normalize`` has to be "true" | "pred" | "all" | "none" | None
424
+ """
425
+ if not isinstance(num_labels, int) or num_labels < 2:
426
+ raise ValueError(f"Expected argument `num_labels` to be an integer larger than 1, but got {num_labels}")
427
+ if not (isinstance(threshold, float) and (0 <= threshold <= 1)):
428
+ raise ValueError(f"Expected argument `threshold` to be a float, but got {threshold}.")
429
+ if ignore_index is not None and not isinstance(ignore_index, int):
430
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
431
+ allowed_normalize = ("true", "pred", "all", "none", None)
432
+ if normalize not in allowed_normalize:
433
+ raise ValueError(f"Expected argument `normalize` to be one of {allowed_normalize}, but got {normalize}.")
434
+
435
+
436
+ def _multilabel_confusion_matrix_tensor_validation(
437
+ preds: Tensor, target: Tensor, num_labels: int, ignore_index: Optional[int] = None
438
+ ) -> None:
439
+ """Validate tensor input.
440
+
441
+ - tensors have to be of same shape
442
+ - the second dimension of both tensors need to be equal to the number of labels
443
+ - all values in target tensor that are not ignored have to be in {0, 1}
444
+ - if pred tensor is not floating point, then all values also have to be in {0, 1}
445
+ """
446
+ # Check that they have same shape
447
+ _check_same_shape(preds, target)
448
+
449
+ if preds.shape[1] != num_labels:
450
+ raise ValueError(
451
+ "Expected both `target.shape[1]` and `preds.shape[1]` to be equal to the number of labels"
452
+ f" but got {preds.shape[1]} and expected {num_labels}"
453
+ )
454
+
455
+ # Check that target only contains [0,1] values or value in ignore_index
456
+ unique_values = torch.unique(target)
457
+ if ignore_index is None:
458
+ check = torch.any((unique_values != 0) & (unique_values != 1))
459
+ else:
460
+ check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
461
+ if check:
462
+ raise RuntimeError(
463
+ f"Detected the following values in `target`: {unique_values} but expected only"
464
+ f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}."
465
+ )
466
+
467
+ # If preds is label tensor, also check that it only contains [0,1] values
468
+ if not preds.is_floating_point():
469
+ unique_values = torch.unique(preds)
470
+ if torch.any((unique_values != 0) & (unique_values != 1)):
471
+ raise RuntimeError(
472
+ f"Detected the following values in `preds`: {unique_values} but expected only"
473
+ " the following values [0,1] since preds is a label tensor."
474
+ )
475
+
476
+
477
+ def _multilabel_confusion_matrix_format(
478
+ preds: Tensor,
479
+ target: Tensor,
480
+ num_labels: int,
481
+ threshold: float = 0.5,
482
+ ignore_index: Optional[int] = None,
483
+ should_threshold: bool = True,
484
+ ) -> Tuple[Tensor, Tensor]:
485
+ """Convert all input to label format.
486
+
487
+ - If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range
488
+ - If preds tensor is floating point, thresholds afterwards
489
+ - Mask all elements that should be ignored with negative numbers for later filtration
490
+ """
491
+ if preds.is_floating_point():
492
+ if not torch.all((0 <= preds) * (preds <= 1)):
493
+ preds = preds.sigmoid()
494
+ if should_threshold:
495
+ preds = preds > threshold
496
+ preds = torch.movedim(preds, 1, -1).reshape(-1, num_labels)
497
+ target = torch.movedim(target, 1, -1).reshape(-1, num_labels)
498
+
499
+ if ignore_index is not None:
500
+ preds = preds.clone()
501
+ target = target.clone()
502
+ # Make sure that when we map, it will always result in a negative number that we can filter away
503
+ # Each label correspond to a 2x2 matrix = 4 elements per label
504
+ idx = target == ignore_index
505
+ preds[idx] = -4 * num_labels
506
+ target[idx] = -4 * num_labels
507
+
508
+ return preds, target
509
+
510
+
511
+ def _multilabel_confusion_matrix_update(preds: Tensor, target: Tensor, num_labels: int) -> Tensor:
512
+ """Computes the bins to update the confusion matrix with."""
513
+ unique_mapping = ((2 * target + preds) + 4 * torch.arange(num_labels, device=preds.device)).flatten()
514
+ unique_mapping = unique_mapping[unique_mapping >= 0]
515
+ bins = _bincount(unique_mapping, minlength=4 * num_labels)
516
+ return bins.reshape(num_labels, 2, 2)
517
+
518
+
519
+ def _multilabel_confusion_matrix_compute(
520
+ confmat: Tensor, normalize: Optional[Literal["true", "pred", "all", "none"]] = None
521
+ ) -> Tensor:
522
+ """Reduces the confusion matrix to it's final form.
523
+
524
+ Normalization technique can be chosen by ``normalize``.
525
+ """
526
+ return _confusion_matrix_reduce(confmat, normalize)
527
+
528
+
529
+ def multilabel_confusion_matrix(
530
+ preds: Tensor,
531
+ target: Tensor,
532
+ num_labels: int,
533
+ threshold: float = 0.5,
534
+ normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
535
+ ignore_index: Optional[int] = None,
536
+ validate_args: bool = True,
537
+ ) -> Tensor:
538
+ r"""Computes the `confusion matrix`_ for multilabel tasks.
539
+
540
+ Accepts the following input tensors:
541
+
542
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
543
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
544
+ we convert to int tensor with thresholding using the value in ``threshold``.
545
+ - ``target`` (int tensor): ``(N, C, ...)``
546
+
547
+ Additional dimension ``...`` will be flattened into the batch dimension.
548
+
549
+ Args:
550
+ preds: Tensor with predictions
551
+ target: Tensor with true labels
552
+ num_labels: Integer specifing the number of labels
553
+ threshold: Threshold for transforming probability to binary (0,1) predictions
554
+ normalize: Normalization mode for confusion matrix. Choose from:
555
+
556
+ - ``None`` or ``'none'``: no normalization (default)
557
+ - ``'true'``: normalization over the targets (most commonly used)
558
+ - ``'pred'``: normalization over the predictions
559
+ - ``'all'``: normalization over the whole matrix
560
+ ignore_index:
561
+ Specifies a target value that is ignored and does not contribute to the metric calculation
562
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
563
+ Set to ``False`` for faster computations.
564
+
565
+ Returns:
566
+ A ``[num_labels, 2, 2]`` tensor
567
+
568
+ Example (preds is int tensor):
569
+ >>> from torchmetrics.functional.classification import multilabel_confusion_matrix
570
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
571
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
572
+ >>> multilabel_confusion_matrix(preds, target, num_labels=3)
573
+ tensor([[[1, 0], [0, 1]],
574
+ [[1, 0], [1, 0]],
575
+ [[0, 1], [0, 1]]])
576
+
577
+ Example (preds is float tensor):
578
+ >>> from torchmetrics.functional.classification import multilabel_confusion_matrix
579
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
580
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
581
+ >>> multilabel_confusion_matrix(preds, target, num_labels=3)
582
+ tensor([[[1, 0], [0, 1]],
583
+ [[1, 0], [1, 0]],
584
+ [[0, 1], [0, 1]]])
585
+ """
586
+ if validate_args:
587
+ _multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index, normalize)
588
+ _multilabel_confusion_matrix_tensor_validation(preds, target, num_labels, ignore_index)
589
+ preds, target = _multilabel_confusion_matrix_format(preds, target, num_labels, threshold, ignore_index)
590
+ confmat = _multilabel_confusion_matrix_update(preds, target, num_labels)
591
+ return _multilabel_confusion_matrix_compute(confmat, normalize)
592
+
593
+
594
+ def confusion_matrix(
595
+ preds: Tensor,
596
+ target: Tensor,
597
+ task: Literal["binary", "multiclass", "multilabel"],
598
+ threshold: float = 0.5,
599
+ num_classes: Optional[int] = None,
600
+ num_labels: Optional[int] = None,
601
+ normalize: Optional[Literal["true", "pred", "all", "none"]] = None,
602
+ ignore_index: Optional[int] = None,
603
+ validate_args: bool = True,
604
+ ) -> Tensor:
605
+ r"""Computes the `confusion matrix`_.
606
+
607
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
608
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
609
+ :func:`binary_confusion_matrix`, :func:`multiclass_confusion_matrix` and :func:`multilabel_confusion_matrix` for
610
+ the specific details of each argument influence and examples.
611
+
612
+ Legacy Example:
613
+ >>> from torchmetrics import ConfusionMatrix
614
+ >>> target = torch.tensor([1, 1, 0, 0])
615
+ >>> preds = torch.tensor([0, 1, 0, 0])
616
+ >>> confmat = ConfusionMatrix(task="binary")
617
+ >>> confmat(preds, target)
618
+ tensor([[2, 0],
619
+ [1, 1]])
620
+
621
+ >>> target = torch.tensor([2, 1, 0, 0])
622
+ >>> preds = torch.tensor([2, 1, 0, 1])
623
+ >>> confmat = ConfusionMatrix(task="multiclass", num_classes=3)
624
+ >>> confmat(preds, target)
625
+ tensor([[1, 1, 0],
626
+ [0, 1, 0],
627
+ [0, 0, 1]])
628
+
629
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
630
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
631
+ >>> confmat = ConfusionMatrix(task="multilabel", num_labels=3)
632
+ >>> confmat(preds, target)
633
+ tensor([[[1, 0], [0, 1]],
634
+ [[1, 0], [1, 0]],
635
+ [[0, 1], [0, 1]]])
636
+ """
637
+ if task == "binary":
638
+ return binary_confusion_matrix(preds, target, threshold, normalize, ignore_index, validate_args)
639
+ if task == "multiclass":
640
+ assert isinstance(num_classes, int)
641
+ return multiclass_confusion_matrix(preds, target, num_classes, normalize, ignore_index, validate_args)
642
+ if task == "multilabel":
643
+ assert isinstance(num_labels, int)
644
+ return multilabel_confusion_matrix(preds, target, num_labels, threshold, normalize, ignore_index, validate_args)
645
+ raise ValueError(
646
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
647
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/dice.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+
19
+ from torchmetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update
20
+ from torchmetrics.utilities.checks import _input_squeeze
21
+ from torchmetrics.utilities.enums import AverageMethod, MDMCAverageMethod
22
+
23
+
24
+ def _dice_compute(
25
+ tp: Tensor,
26
+ fp: Tensor,
27
+ fn: Tensor,
28
+ average: Optional[str],
29
+ mdmc_average: Optional[str],
30
+ zero_division: int = 0,
31
+ ) -> Tensor:
32
+ """Computes dice from the stat scores: true positives, false positives, false negatives.
33
+
34
+ Args:
35
+ tp: True positives
36
+ fp: False positives
37
+ fn: False negatives
38
+ average: Defines the reduction that is applied
39
+ mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the
40
+ ``average`` parameter)
41
+ """
42
+ numerator = 2 * tp
43
+ denominator = 2 * tp + fp + fn
44
+
45
+ if average == AverageMethod.MACRO and mdmc_average != MDMCAverageMethod.SAMPLEWISE:
46
+ cond = tp + fp + fn == 0
47
+ numerator = numerator[~cond]
48
+ denominator = denominator[~cond]
49
+
50
+ if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE:
51
+ # a class is not present if there exists no TPs, no FPs, and no FNs
52
+ meaningless_indeces = torch.nonzero((tp | fn | fp) == 0).cpu()
53
+ numerator[meaningless_indeces, ...] = -1
54
+ denominator[meaningless_indeces, ...] = -1
55
+
56
+ return _reduce_stat_scores(
57
+ numerator=numerator,
58
+ denominator=denominator,
59
+ weights=None if average != "weighted" else tp + fn,
60
+ average=average,
61
+ mdmc_average=mdmc_average,
62
+ zero_division=zero_division,
63
+ )
64
+
65
+
66
+ def dice(
67
+ preds: Tensor,
68
+ target: Tensor,
69
+ zero_division: int = 0,
70
+ average: Optional[str] = "micro",
71
+ mdmc_average: Optional[str] = "global",
72
+ threshold: float = 0.5,
73
+ top_k: Optional[int] = None,
74
+ num_classes: Optional[int] = None,
75
+ multiclass: Optional[bool] = None,
76
+ ignore_index: Optional[int] = None,
77
+ ) -> Tensor:
78
+ r"""Computes `Dice`_:
79
+
80
+ .. math:: \text{Dice} = \frac{\text{2 * TP}}{\text{2 * TP} + \text{FP} + \text{FN}}
81
+
82
+ Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
83
+ false negatives respecitively.
84
+
85
+ It is recommend set `ignore_index` to index of background class.
86
+
87
+ The reduction method (how the recall scores are aggregated) is controlled by the
88
+ ``average`` parameter, and additionally by the ``mdmc_average`` parameter in the
89
+ multi-dimensional multi-class case.
90
+
91
+ Args:
92
+ preds: Predictions from model (probabilities, logits or labels)
93
+ target: Ground truth values
94
+ zero_division: The value to use for the score if denominator equals zero
95
+ average:
96
+ Defines the reduction that is applied. Should be one of the following:
97
+
98
+ - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes.
99
+ - ``'macro'``: Calculate the metric for each class separately, and average the
100
+ metrics across classes (with equal weights for each class).
101
+ - ``'weighted'``: Calculate the metric for each class separately, and average the
102
+ metrics across classes, weighting each class by its support (``tp + fn``).
103
+ - ``'none'`` or ``None``: Calculate the metric for each class separately, and return
104
+ the metric for every class.
105
+ - ``'samples'``: Calculate the metric for each sample, and average the metrics
106
+ across samples (with equal weights for each sample).
107
+
108
+ .. note:: What is considered a sample in the multi-dimensional multi-class case
109
+ depends on the value of ``mdmc_average``.
110
+
111
+ .. note:: If ``'none'`` and a given class doesn't occur in the ``preds`` or ``target``,
112
+ the value for the class will be ``nan``.
113
+
114
+ mdmc_average:
115
+ Defines how averaging is done for multi-dimensional multi-class inputs (on top of the
116
+ ``average`` parameter). Should be one of the following:
117
+
118
+ - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional
119
+ multi-class.
120
+
121
+ - ``'samplewise'``: In this case, the statistics are computed separately for each
122
+ sample on the ``N`` axis, and then averaged over samples.
123
+ The computation for each sample is done by treating the flattened extra axes ``...``
124
+ as the ``N`` dimension within the sample,
125
+ and computing the metric for the sample based on that.
126
+
127
+ - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs
128
+ are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they
129
+ were ``(N_X, C)``. From here on the ``average`` parameter applies as usual.
130
+
131
+ ignore_index:
132
+ Integer specifying a target class to ignore. If given, this class index does not contribute
133
+ to the returned score, regardless of reduction method. If an index is ignored, and ``average=None``
134
+ or ``'none'``, the score for the ignored class will be returned as ``nan``.
135
+
136
+ num_classes:
137
+ Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods.
138
+
139
+ threshold:
140
+ Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case
141
+ of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities.
142
+ top_k:
143
+ Number of the highest probability or logit score predictions considered finding the correct label,
144
+ relevant only for (multi-dimensional) multi-class inputs. The
145
+ default value (``None``) will be interpreted as 1 for these inputs.
146
+
147
+ Should be left at default (``None``) for all other types of inputs.
148
+ multiclass:
149
+ Used only in certain special cases, where you want to treat inputs as a different type
150
+ than what they appear to be.
151
+
152
+ Return:
153
+ The shape of the returned tensor depends on the ``average`` parameter
154
+
155
+ - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned
156
+ - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands for the number of classes
157
+
158
+ Raises:
159
+ ValueError:
160
+ If ``average`` is not one of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"`` or ``None``
161
+ ValueError:
162
+ If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``.
163
+ ValueError:
164
+ If ``average`` is set but ``num_classes`` is not provided.
165
+ ValueError:
166
+ If ``num_classes`` is set and ``ignore_index`` is not in the range ``[0, num_classes)``.
167
+
168
+ Example:
169
+ >>> from torchmetrics.functional import dice
170
+ >>> preds = torch.tensor([2, 0, 2, 1])
171
+ >>> target = torch.tensor([1, 1, 2, 0])
172
+ >>> dice(preds, target, average='micro')
173
+ tensor(0.2500)
174
+ """
175
+ allowed_average = ("micro", "macro", "weighted", "samples", "none", None)
176
+ if average not in allowed_average:
177
+ raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.")
178
+
179
+ if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1):
180
+ raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.")
181
+
182
+ allowed_mdmc_average = [None, "samplewise", "global"]
183
+ if mdmc_average not in allowed_mdmc_average:
184
+ raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.")
185
+
186
+ if num_classes and ignore_index is not None and (not ignore_index < num_classes or num_classes == 1):
187
+ raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes")
188
+
189
+ if top_k is not None and (not isinstance(top_k, int) or top_k <= 0):
190
+ raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}")
191
+
192
+ preds, target = _input_squeeze(preds, target)
193
+ reduce = "macro" if average in ("weighted", "none", None) else average
194
+
195
+ tp, fp, _, fn = _stat_scores_update(
196
+ preds,
197
+ target,
198
+ reduce=reduce,
199
+ mdmc_reduce=mdmc_average,
200
+ threshold=threshold,
201
+ num_classes=num_classes,
202
+ top_k=top_k,
203
+ multiclass=multiclass,
204
+ ignore_index=ignore_index,
205
+ )
206
+
207
+ return _dice_compute(tp, fp, fn, average, mdmc_average, zero_division)
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/exact_match.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional, Tuple
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.stat_scores import (
21
+ _multiclass_stat_scores_arg_validation,
22
+ _multiclass_stat_scores_format,
23
+ _multiclass_stat_scores_tensor_validation,
24
+ _multilabel_stat_scores_arg_validation,
25
+ _multilabel_stat_scores_format,
26
+ _multilabel_stat_scores_tensor_validation,
27
+ )
28
+ from torchmetrics.utilities.compute import _safe_divide
29
+
30
+
31
+ def _exact_match_reduce(
32
+ correct: Tensor,
33
+ total: Tensor,
34
+ ) -> Tensor:
35
+ """Final reduction for exact match."""
36
+ return _safe_divide(correct, total)
37
+
38
+
39
+ def _multiclass_exact_match_update(
40
+ preds: Tensor,
41
+ target: Tensor,
42
+ multidim_average: Literal["global", "samplewise"] = "global",
43
+ ) -> Tuple[Tensor, Tensor]:
44
+ """Computes the statistics."""
45
+ correct = (preds == target).sum(1) == preds.shape[1]
46
+ correct = correct if multidim_average == "samplewise" else correct.sum()
47
+ total = torch.tensor(preds.shape[0] if multidim_average == "global" else 1, device=correct.device)
48
+ return correct, total
49
+
50
+
51
+ def multiclass_exact_match(
52
+ preds: Tensor,
53
+ target: Tensor,
54
+ num_classes: int,
55
+ multidim_average: Literal["global", "samplewise"] = "global",
56
+ ignore_index: Optional[int] = None,
57
+ validate_args: bool = True,
58
+ ) -> Tensor:
59
+ r"""Computes Exact match (also known as subset accuracy) for multiclass tasks. Exact Match is a stricter version
60
+ of accuracy where all labels have to match exactly for the sample to be correctly classified.
61
+
62
+ Accepts the following input tensors:
63
+
64
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
65
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
66
+ an int tensor.
67
+ - ``target`` (int tensor): ``(N, ...)``
68
+
69
+ Args:
70
+ preds: Tensor with predictions
71
+ target: Tensor with true labels
72
+ num_classes: Integer specifing the number of labels
73
+ multidim_average:
74
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
75
+
76
+ - ``global``: Additional dimensions are flatted along the batch dimension
77
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
78
+ The statistics in this case are calculated over the additional dimensions.
79
+
80
+ ignore_index:
81
+ Specifies a target value that is ignored and does not contribute to the metric calculation
82
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
83
+ Set to ``False`` for faster computations.
84
+
85
+ Returns:
86
+ The returned shape depends on the ``multidim_average`` argument:
87
+
88
+ - If ``multidim_average`` is set to ``global`` the output will be a scalar tensor
89
+ - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)``
90
+
91
+ Example (multidim tensors):
92
+ >>> from torchmetrics.functional.classification import multiclass_exact_match
93
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
94
+ >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
95
+ >>> multiclass_exact_match(preds, target, num_classes=3, multidim_average='global')
96
+ tensor(0.5000)
97
+
98
+ Example (multidim tensors):
99
+ >>> from torchmetrics.functional.classification import multiclass_exact_match
100
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
101
+ >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
102
+ >>> multiclass_exact_match(preds, target, num_classes=3, multidim_average='samplewise')
103
+ tensor([1., 0.])
104
+ """
105
+ top_k, average = 1, None
106
+ if validate_args:
107
+ _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
108
+ _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
109
+ preds, target = _multiclass_stat_scores_format(preds, target, top_k)
110
+ correct, total = _multiclass_exact_match_update(preds, target, multidim_average)
111
+ return _exact_match_reduce(correct, total)
112
+
113
+
114
+ def _multilabel_exact_match_update(
115
+ preds: Tensor, target: Tensor, num_labels: int, multidim_average: Literal["global", "samplewise"] = "global"
116
+ ) -> Tuple[Tensor, Tensor]:
117
+ """Computes the statistics."""
118
+ if multidim_average == "global":
119
+ preds = torch.movedim(preds, 1, -1).reshape(-1, num_labels)
120
+ target = torch.movedim(target, 1, -1).reshape(-1, num_labels)
121
+
122
+ correct = ((preds == target).sum(1) == num_labels).sum(dim=-1)
123
+ total = torch.tensor(preds.shape[0 if multidim_average == "global" else 2], device=correct.device)
124
+ return correct, total
125
+
126
+
127
+ def multilabel_exact_match(
128
+ preds: Tensor,
129
+ target: Tensor,
130
+ num_labels: int,
131
+ threshold: float = 0.5,
132
+ multidim_average: Literal["global", "samplewise"] = "global",
133
+ ignore_index: Optional[int] = None,
134
+ validate_args: bool = True,
135
+ ) -> Tensor:
136
+ r"""Computes Exact match (also known as subset accuracy) for multilabel tasks. Exact Match is a stricter version
137
+ of accuracy where all labels have to match exactly for the sample to be correctly classified.
138
+
139
+ Accepts the following input tensors:
140
+
141
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
142
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
143
+ we convert to int tensor with thresholding using the value in ``threshold``.
144
+ - ``target`` (int tensor): ``(N, C, ...)``
145
+
146
+ Args:
147
+ preds: Tensor with predictions
148
+ target: Tensor with true labels
149
+ num_labels: Integer specifing the number of labels
150
+ threshold: Threshold for transforming probability to binary (0,1) predictions
151
+ multidim_average:
152
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
153
+
154
+ - ``global``: Additional dimensions are flatted along the batch dimension
155
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
156
+ The statistics in this case are calculated over the additional dimensions.
157
+
158
+ ignore_index:
159
+ Specifies a target value that is ignored and does not contribute to the metric calculation
160
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
161
+ Set to ``False`` for faster computations.
162
+
163
+ Returns:
164
+ The returned shape depends on the ``multidim_average`` argument:
165
+
166
+ - If ``multidim_average`` is set to ``global`` the output will be a scalar tensor
167
+ - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)``
168
+
169
+ Example (preds is int tensor):
170
+ >>> from torchmetrics.functional.classification import multilabel_exact_match
171
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
172
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
173
+ >>> multilabel_exact_match(preds, target, num_labels=3)
174
+ tensor(0.5000)
175
+
176
+ Example (preds is float tensor):
177
+ >>> from torchmetrics.functional.classification import multilabel_exact_match
178
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
179
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
180
+ >>> multilabel_exact_match(preds, target, num_labels=3)
181
+ tensor(0.5000)
182
+
183
+ Example (multidim tensors):
184
+ >>> from torchmetrics.functional.classification import multilabel_exact_match
185
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
186
+ >>> preds = torch.tensor(
187
+ ... [
188
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
189
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
190
+ ... ]
191
+ ... )
192
+ >>> multilabel_exact_match(preds, target, num_labels=3, multidim_average='samplewise')
193
+ tensor([0., 0.])
194
+ """
195
+ average = None
196
+ if validate_args:
197
+ _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
198
+ _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
199
+ preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
200
+ correct, total = _multilabel_exact_match_update(preds, target, num_labels, multidim_average)
201
+ return _exact_match_reduce(correct, total)
202
+
203
+
204
+ def exact_match(
205
+ preds: Tensor,
206
+ target: Tensor,
207
+ task: Literal["multiclass", "multilabel"],
208
+ num_classes: Optional[int] = None,
209
+ num_labels: Optional[int] = None,
210
+ threshold: float = 0.5,
211
+ multidim_average: Literal["global", "samplewise"] = "global",
212
+ ignore_index: Optional[int] = None,
213
+ validate_args: bool = True,
214
+ ) -> Tensor:
215
+ r"""Computes Exact match (also known as subset accuracy). Exact Match is a stricter version of accuracy where
216
+ all classes/labels have to match exactly for the sample to be correctly classified.
217
+
218
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
219
+ ``task`` argument to either ``'multiclass'`` or ``'multilabel'``. See the documentation of
220
+ :func:`multiclass_exact_match` and :func:`multilabel_exact_match` for the specific details of
221
+ each argument influence and examples.
222
+ Legacy Example:
223
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
224
+ >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
225
+ >>> exact_match(preds, target, task="multiclass", num_classes=3, multidim_average='global')
226
+ tensor(0.5000)
227
+
228
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
229
+ >>> preds = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[2, 2], [2, 1], [1, 0]]])
230
+ >>> exact_match(preds, target, task="multiclass", num_classes=3, multidim_average='samplewise')
231
+ tensor([1., 0.])
232
+ """
233
+ if task == "multiclass":
234
+ assert num_classes is not None
235
+ return multiclass_exact_match(preds, target, num_classes, multidim_average, ignore_index, validate_args)
236
+ if task == "multilabel":
237
+ assert num_labels is not None
238
+ return multilabel_exact_match(
239
+ preds, target, num_labels, threshold, multidim_average, ignore_index, validate_args
240
+ )
241
+ raise ValueError(f"Expected argument `task` to either be `'multiclass'` or `'multilabel'` but got {task}")
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/f_beta.py ADDED
@@ -0,0 +1,775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.stat_scores import (
21
+ _binary_stat_scores_arg_validation,
22
+ _binary_stat_scores_format,
23
+ _binary_stat_scores_tensor_validation,
24
+ _binary_stat_scores_update,
25
+ _multiclass_stat_scores_arg_validation,
26
+ _multiclass_stat_scores_format,
27
+ _multiclass_stat_scores_tensor_validation,
28
+ _multiclass_stat_scores_update,
29
+ _multilabel_stat_scores_arg_validation,
30
+ _multilabel_stat_scores_format,
31
+ _multilabel_stat_scores_tensor_validation,
32
+ _multilabel_stat_scores_update,
33
+ )
34
+ from torchmetrics.utilities.compute import _safe_divide
35
+
36
+
37
+ def _fbeta_reduce(
38
+ tp: Tensor,
39
+ fp: Tensor,
40
+ tn: Tensor,
41
+ fn: Tensor,
42
+ beta: float,
43
+ average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]],
44
+ multidim_average: Literal["global", "samplewise"] = "global",
45
+ ) -> Tensor:
46
+ beta2 = beta**2
47
+ if average == "binary":
48
+ return _safe_divide((1 + beta2) * tp, (1 + beta2) * tp + beta2 * fn + fp)
49
+ elif average == "micro":
50
+ tp = tp.sum(dim=0 if multidim_average == "global" else 1)
51
+ fn = fn.sum(dim=0 if multidim_average == "global" else 1)
52
+ fp = fp.sum(dim=0 if multidim_average == "global" else 1)
53
+ return _safe_divide((1 + beta2) * tp, (1 + beta2) * tp + beta2 * fn + fp)
54
+ else:
55
+ fbeta_score = _safe_divide((1 + beta2) * tp, (1 + beta2) * tp + beta2 * fn + fp)
56
+ if average is None or average == "none":
57
+ return fbeta_score
58
+ if average == "weighted":
59
+ weights = tp + fn
60
+ else:
61
+ weights = torch.ones_like(fbeta_score)
62
+ return _safe_divide(weights * fbeta_score, weights.sum(-1, keepdim=True)).sum(-1)
63
+
64
+
65
+ def _binary_fbeta_score_arg_validation(
66
+ beta: float,
67
+ threshold: float = 0.5,
68
+ multidim_average: Literal["global", "samplewise"] = "global",
69
+ ignore_index: Optional[int] = None,
70
+ ) -> None:
71
+ if not (isinstance(beta, float) and beta > 0):
72
+ raise ValueError(f"Expected argument `beta` to be a float larger than 0, but got {beta}.")
73
+ _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
74
+
75
+
76
+ def binary_fbeta_score(
77
+ preds: Tensor,
78
+ target: Tensor,
79
+ beta: float,
80
+ threshold: float = 0.5,
81
+ multidim_average: Literal["global", "samplewise"] = "global",
82
+ ignore_index: Optional[int] = None,
83
+ validate_args: bool = True,
84
+ ) -> Tensor:
85
+ r"""Computes `F-score`_ metric for binary tasks:
86
+
87
+ .. math::
88
+ F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
89
+ {(\beta^2 * \text{precision}) + \text{recall}}
90
+
91
+ Accepts the following input tensors:
92
+
93
+ - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
94
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
95
+ we convert to int tensor with thresholding using the value in ``threshold``.
96
+ - ``target`` (int tensor): ``(N, ...)``
97
+
98
+ Args:
99
+ preds: Tensor with predictions
100
+ target: Tensor with true labels
101
+ beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
102
+ threshold: Threshold for transforming probability to binary {0,1} predictions
103
+ multidim_average:
104
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
105
+
106
+ - ``global``: Additional dimensions are flatted along the batch dimension
107
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
108
+ The statistics in this case are calculated over the additional dimensions.
109
+
110
+ ignore_index:
111
+ Specifies a target value that is ignored and does not contribute to the metric calculation
112
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
113
+ Set to ``False`` for faster computations.
114
+
115
+ Returns:
116
+ If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
117
+ is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
118
+
119
+ Example (preds is int tensor):
120
+ >>> from torchmetrics.functional.classification import binary_fbeta_score
121
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
122
+ >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
123
+ >>> binary_fbeta_score(preds, target, beta=2.0)
124
+ tensor(0.6667)
125
+
126
+ Example (preds is float tensor):
127
+ >>> from torchmetrics.functional.classification import binary_fbeta_score
128
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
129
+ >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
130
+ >>> binary_fbeta_score(preds, target, beta=2.0)
131
+ tensor(0.6667)
132
+
133
+ Example (multidim tensors):
134
+ >>> from torchmetrics.functional.classification import binary_fbeta_score
135
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
136
+ >>> preds = torch.tensor(
137
+ ... [
138
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
139
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
140
+ ... ]
141
+ ... )
142
+ >>> binary_fbeta_score(preds, target, beta=2.0, multidim_average='samplewise')
143
+ tensor([0.5882, 0.0000])
144
+ """
145
+ if validate_args:
146
+ _binary_fbeta_score_arg_validation(beta, threshold, multidim_average, ignore_index)
147
+ _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
148
+ preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
149
+ tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
150
+ return _fbeta_reduce(tp, fp, tn, fn, beta, average="binary", multidim_average=multidim_average)
151
+
152
+
153
+ def _multiclass_fbeta_score_arg_validation(
154
+ beta: float,
155
+ num_classes: int,
156
+ top_k: int = 1,
157
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
158
+ multidim_average: Literal["global", "samplewise"] = "global",
159
+ ignore_index: Optional[int] = None,
160
+ ) -> None:
161
+ if not (isinstance(beta, float) and beta > 0):
162
+ raise ValueError(f"Expected argument `beta` to be a float larger than 0, but got {beta}.")
163
+ _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
164
+
165
+
166
+ def multiclass_fbeta_score(
167
+ preds: Tensor,
168
+ target: Tensor,
169
+ beta: float,
170
+ num_classes: int,
171
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
172
+ top_k: int = 1,
173
+ multidim_average: Literal["global", "samplewise"] = "global",
174
+ ignore_index: Optional[int] = None,
175
+ validate_args: bool = True,
176
+ ) -> Tensor:
177
+ r"""Computes `F-score`_ metric for multiclass tasks:
178
+
179
+ .. math::
180
+ F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
181
+ {(\beta^2 * \text{precision}) + \text{recall}}
182
+
183
+ Accepts the following input tensors:
184
+
185
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
186
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
187
+ an int tensor.
188
+ - ``target`` (int tensor): ``(N, ...)``
189
+
190
+ Args:
191
+ preds: Tensor with predictions
192
+ target: Tensor with true labels
193
+ beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
194
+ num_classes: Integer specifing the number of classes
195
+ average:
196
+ Defines the reduction that is applied over labels. Should be one of the following:
197
+
198
+ - ``micro``: Sum statistics over all labels
199
+ - ``macro``: Calculate statistics for each label and average them
200
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
201
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
202
+ top_k:
203
+ Number of highest probability or logit score predictions considered to find the correct label.
204
+ Only works when ``preds`` contain probabilities/logits.
205
+ multidim_average:
206
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
207
+
208
+ - ``global``: Additional dimensions are flatted along the batch dimension
209
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
210
+ The statistics in this case are calculated over the additional dimensions.
211
+
212
+ ignore_index:
213
+ Specifies a target value that is ignored and does not contribute to the metric calculation
214
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
215
+ Set to ``False`` for faster computations.
216
+
217
+ Returns:
218
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
219
+
220
+ - If ``multidim_average`` is set to ``global``:
221
+
222
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
223
+ - If ``average=None/'none'``, the shape will be ``(C,)``
224
+
225
+ - If ``multidim_average`` is set to ``samplewise``:
226
+
227
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
228
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
229
+
230
+ Example (preds is int tensor):
231
+ >>> from torchmetrics.functional.classification import multiclass_fbeta_score
232
+ >>> target = torch.tensor([2, 1, 0, 0])
233
+ >>> preds = torch.tensor([2, 1, 0, 1])
234
+ >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3)
235
+ tensor(0.7963)
236
+ >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, average=None)
237
+ tensor([0.5556, 0.8333, 1.0000])
238
+
239
+ Example (preds is float tensor):
240
+ >>> from torchmetrics.functional.classification import multiclass_fbeta_score
241
+ >>> target = torch.tensor([2, 1, 0, 0])
242
+ >>> preds = torch.tensor([
243
+ ... [0.16, 0.26, 0.58],
244
+ ... [0.22, 0.61, 0.17],
245
+ ... [0.71, 0.09, 0.20],
246
+ ... [0.05, 0.82, 0.13],
247
+ ... ])
248
+ >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3)
249
+ tensor(0.7963)
250
+ >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, average=None)
251
+ tensor([0.5556, 0.8333, 1.0000])
252
+
253
+ Example (multidim tensors):
254
+ >>> from torchmetrics.functional.classification import multiclass_fbeta_score
255
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
256
+ >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
257
+ >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, multidim_average='samplewise')
258
+ tensor([0.4697, 0.2706])
259
+ >>> multiclass_fbeta_score(preds, target, beta=2.0, num_classes=3, multidim_average='samplewise', average=None)
260
+ tensor([[0.9091, 0.0000, 0.5000],
261
+ [0.0000, 0.3571, 0.4545]])
262
+ """
263
+ if validate_args:
264
+ _multiclass_fbeta_score_arg_validation(beta, num_classes, top_k, average, multidim_average, ignore_index)
265
+ _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
266
+ preds, target = _multiclass_stat_scores_format(preds, target, top_k)
267
+ tp, fp, tn, fn = _multiclass_stat_scores_update(
268
+ preds, target, num_classes, top_k, average, multidim_average, ignore_index
269
+ )
270
+ return _fbeta_reduce(tp, fp, tn, fn, beta, average=average, multidim_average=multidim_average)
271
+
272
+
273
+ def _multilabel_fbeta_score_arg_validation(
274
+ beta: float,
275
+ num_labels: int,
276
+ threshold: float = 0.5,
277
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
278
+ multidim_average: Literal["global", "samplewise"] = "global",
279
+ ignore_index: Optional[int] = None,
280
+ ) -> None:
281
+ if not (isinstance(beta, float) and beta > 0):
282
+ raise ValueError(f"Expected argument `beta` to be a float larger than 0, but got {beta}.")
283
+ _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
284
+
285
+
286
+ def multilabel_fbeta_score(
287
+ preds: Tensor,
288
+ target: Tensor,
289
+ beta: float,
290
+ num_labels: int,
291
+ threshold: float = 0.5,
292
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
293
+ multidim_average: Literal["global", "samplewise"] = "global",
294
+ ignore_index: Optional[int] = None,
295
+ validate_args: bool = True,
296
+ ) -> Tensor:
297
+ r"""Computes `F-score`_ metric for multilabel tasks:
298
+
299
+ .. math::
300
+ F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
301
+ {(\beta^2 * \text{precision}) + \text{recall}}
302
+
303
+ Accepts the following input tensors:
304
+
305
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
306
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
307
+ we convert to int tensor with thresholding using the value in ``threshold``.
308
+ - ``target`` (int tensor): ``(N, C, ...)``
309
+
310
+ Args:
311
+ preds: Tensor with predictions
312
+ target: Tensor with true labels
313
+ beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
314
+ num_labels: Integer specifing the number of labels
315
+ threshold: Threshold for transforming probability to binary (0,1) predictions
316
+ average:
317
+ Defines the reduction that is applied over labels. Should be one of the following:
318
+
319
+ - ``micro``: Sum statistics over all labels
320
+ - ``macro``: Calculate statistics for each label and average them
321
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
322
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
323
+
324
+ multidim_average:
325
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
326
+
327
+ - ``global``: Additional dimensions are flatted along the batch dimension
328
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
329
+ The statistics in this case are calculated over the additional dimensions.
330
+
331
+ ignore_index:
332
+ Specifies a target value that is ignored and does not contribute to the metric calculation
333
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
334
+ Set to ``False`` for faster computations.
335
+
336
+ Returns:
337
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
338
+
339
+ - If ``multidim_average`` is set to ``global``:
340
+
341
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
342
+ - If ``average=None/'none'``, the shape will be ``(C,)``
343
+
344
+ - If ``multidim_average`` is set to ``samplewise``:
345
+
346
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
347
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
348
+
349
+ Example (preds is int tensor):
350
+ >>> from torchmetrics.functional.classification import multilabel_fbeta_score
351
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
352
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
353
+ >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3)
354
+ tensor(0.6111)
355
+ >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3, average=None)
356
+ tensor([1.0000, 0.0000, 0.8333])
357
+
358
+ Example (preds is float tensor):
359
+ >>> from torchmetrics.functional.classification import multilabel_fbeta_score
360
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
361
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
362
+ >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3)
363
+ tensor(0.6111)
364
+ >>> multilabel_fbeta_score(preds, target, beta=2.0, num_labels=3, average=None)
365
+ tensor([1.0000, 0.0000, 0.8333])
366
+
367
+ Example (multidim tensors):
368
+ >>> from torchmetrics.functional.classification import multilabel_fbeta_score
369
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
370
+ >>> preds = torch.tensor(
371
+ ... [
372
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
373
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
374
+ ... ]
375
+ ... )
376
+ >>> multilabel_fbeta_score(preds, target, num_labels=3, beta=2.0, multidim_average='samplewise')
377
+ tensor([0.5556, 0.0000])
378
+ >>> multilabel_fbeta_score(preds, target, num_labels=3, beta=2.0, multidim_average='samplewise', average=None)
379
+ tensor([[0.8333, 0.8333, 0.0000],
380
+ [0.0000, 0.0000, 0.0000]])
381
+ """
382
+ if validate_args:
383
+ _multilabel_fbeta_score_arg_validation(beta, num_labels, threshold, average, multidim_average, ignore_index)
384
+ _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
385
+ preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
386
+ tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
387
+ return _fbeta_reduce(tp, fp, tn, fn, beta, average=average, multidim_average=multidim_average)
388
+
389
+
390
+ def binary_f1_score(
391
+ preds: Tensor,
392
+ target: Tensor,
393
+ threshold: float = 0.5,
394
+ multidim_average: Literal["global", "samplewise"] = "global",
395
+ ignore_index: Optional[int] = None,
396
+ validate_args: bool = True,
397
+ ) -> Tensor:
398
+ r"""Computes F-1 score for binary tasks:
399
+
400
+ .. math::
401
+ F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
402
+
403
+ Accepts the following input tensors:
404
+
405
+ - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
406
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
407
+ we convert to int tensor with thresholding using the value in ``threshold``.
408
+ - ``target`` (int tensor): ``(N, ...)``
409
+
410
+ Args:
411
+ preds: Tensor with predictions
412
+ target: Tensor with true labels
413
+ threshold: Threshold for transforming probability to binary {0,1} predictions
414
+ multidim_average:
415
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
416
+
417
+ - ``global``: Additional dimensions are flatted along the batch dimension
418
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
419
+ The statistics in this case are calculated over the additional dimensions.
420
+
421
+ ignore_index:
422
+ Specifies a target value that is ignored and does not contribute to the metric calculation
423
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
424
+ Set to ``False`` for faster computations.
425
+
426
+ Returns:
427
+ If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
428
+ is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
429
+
430
+ Example (preds is int tensor):
431
+ >>> from torchmetrics.functional.classification import binary_f1_score
432
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
433
+ >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
434
+ >>> binary_f1_score(preds, target)
435
+ tensor(0.6667)
436
+
437
+ Example (preds is float tensor):
438
+ >>> from torchmetrics.functional.classification import binary_f1_score
439
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
440
+ >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
441
+ >>> binary_f1_score(preds, target)
442
+ tensor(0.6667)
443
+
444
+ Example (multidim tensors):
445
+ >>> from torchmetrics.functional.classification import binary_f1_score
446
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
447
+ >>> preds = torch.tensor(
448
+ ... [
449
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
450
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
451
+ ... ]
452
+ ... )
453
+ >>> binary_f1_score(preds, target, multidim_average='samplewise')
454
+ tensor([0.5000, 0.0000])
455
+ """
456
+ return binary_fbeta_score(
457
+ preds=preds,
458
+ target=target,
459
+ beta=1.0,
460
+ threshold=threshold,
461
+ multidim_average=multidim_average,
462
+ ignore_index=ignore_index,
463
+ validate_args=validate_args,
464
+ )
465
+
466
+
467
+ def multiclass_f1_score(
468
+ preds: Tensor,
469
+ target: Tensor,
470
+ num_classes: int,
471
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
472
+ top_k: int = 1,
473
+ multidim_average: Literal["global", "samplewise"] = "global",
474
+ ignore_index: Optional[int] = None,
475
+ validate_args: bool = True,
476
+ ) -> Tensor:
477
+ r"""Computes F-1 score for multiclass tasks:
478
+
479
+ .. math::
480
+ F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
481
+
482
+ Accepts the following input tensors:
483
+
484
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
485
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
486
+ an int tensor.
487
+ - ``target`` (int tensor): ``(N, ...)``
488
+
489
+ Args:
490
+ preds: Tensor with predictions
491
+ target: Tensor with true labels
492
+ num_classes: Integer specifing the number of classes
493
+ average:
494
+ Defines the reduction that is applied over labels. Should be one of the following:
495
+
496
+ - ``micro``: Sum statistics over all labels
497
+ - ``macro``: Calculate statistics for each label and average them
498
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
499
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
500
+ top_k:
501
+ Number of highest probability or logit score predictions considered to find the correct label.
502
+ Only works when ``preds`` contain probabilities/logits.
503
+ multidim_average:
504
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
505
+
506
+ - ``global``: Additional dimensions are flatted along the batch dimension
507
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
508
+ The statistics in this case are calculated over the additional dimensions.
509
+
510
+ ignore_index:
511
+ Specifies a target value that is ignored and does not contribute to the metric calculation
512
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
513
+ Set to ``False`` for faster computations.
514
+
515
+ Returns:
516
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
517
+
518
+ - If ``multidim_average`` is set to ``global``:
519
+
520
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
521
+ - If ``average=None/'none'``, the shape will be ``(C,)``
522
+
523
+ - If ``multidim_average`` is set to ``samplewise``:
524
+
525
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
526
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
527
+
528
+ Example (preds is int tensor):
529
+ >>> from torchmetrics.functional.classification import multiclass_f1_score
530
+ >>> target = torch.tensor([2, 1, 0, 0])
531
+ >>> preds = torch.tensor([2, 1, 0, 1])
532
+ >>> multiclass_f1_score(preds, target, num_classes=3)
533
+ tensor(0.7778)
534
+ >>> multiclass_f1_score(preds, target, num_classes=3, average=None)
535
+ tensor([0.6667, 0.6667, 1.0000])
536
+
537
+ Example (preds is float tensor):
538
+ >>> from torchmetrics.functional.classification import multiclass_f1_score
539
+ >>> target = torch.tensor([2, 1, 0, 0])
540
+ >>> preds = torch.tensor([
541
+ ... [0.16, 0.26, 0.58],
542
+ ... [0.22, 0.61, 0.17],
543
+ ... [0.71, 0.09, 0.20],
544
+ ... [0.05, 0.82, 0.13],
545
+ ... ])
546
+ >>> multiclass_f1_score(preds, target, num_classes=3)
547
+ tensor(0.7778)
548
+ >>> multiclass_f1_score(preds, target, num_classes=3, average=None)
549
+ tensor([0.6667, 0.6667, 1.0000])
550
+
551
+ Example (multidim tensors):
552
+ >>> from torchmetrics.functional.classification import multiclass_f1_score
553
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
554
+ >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
555
+ >>> multiclass_f1_score(preds, target, num_classes=3, multidim_average='samplewise')
556
+ tensor([0.4333, 0.2667])
557
+ >>> multiclass_f1_score(preds, target, num_classes=3, multidim_average='samplewise', average=None)
558
+ tensor([[0.8000, 0.0000, 0.5000],
559
+ [0.0000, 0.4000, 0.4000]])
560
+ """
561
+ return multiclass_fbeta_score(
562
+ preds=preds,
563
+ target=target,
564
+ beta=1.0,
565
+ num_classes=num_classes,
566
+ average=average,
567
+ top_k=top_k,
568
+ multidim_average=multidim_average,
569
+ ignore_index=ignore_index,
570
+ validate_args=validate_args,
571
+ )
572
+
573
+
574
+ def multilabel_f1_score(
575
+ preds: Tensor,
576
+ target: Tensor,
577
+ num_labels: int,
578
+ threshold: float = 0.5,
579
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
580
+ multidim_average: Literal["global", "samplewise"] = "global",
581
+ ignore_index: Optional[int] = None,
582
+ validate_args: bool = True,
583
+ ) -> Tensor:
584
+ r"""Computes F-1 score for multilabel tasks:
585
+
586
+ .. math::
587
+ F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
588
+
589
+ Accepts the following input tensors:
590
+
591
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
592
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
593
+ we convert to int tensor with thresholding using the value in ``threshold``.
594
+ - ``target`` (int tensor): ``(N, C, ...)``
595
+
596
+ Args:
597
+ preds: Tensor with predictions
598
+ target: Tensor with true labels
599
+ num_labels: Integer specifing the number of labels
600
+ threshold: Threshold for transforming probability to binary (0,1) predictions
601
+ average:
602
+ Defines the reduction that is applied over labels. Should be one of the following:
603
+
604
+ - ``micro``: Sum statistics over all labels
605
+ - ``macro``: Calculate statistics for each label and average them
606
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
607
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
608
+
609
+ multidim_average:
610
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
611
+
612
+ - ``global``: Additional dimensions are flatted along the batch dimension
613
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
614
+ The statistics in this case are calculated over the additional dimensions.
615
+
616
+ ignore_index:
617
+ Specifies a target value that is ignored and does not contribute to the metric calculation
618
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
619
+ Set to ``False`` for faster computations.
620
+
621
+ Returns:
622
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
623
+
624
+ - If ``multidim_average`` is set to ``global``:
625
+
626
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
627
+ - If ``average=None/'none'``, the shape will be ``(C,)``
628
+
629
+ - If ``multidim_average`` is set to ``samplewise``:
630
+
631
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
632
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
633
+
634
+ Example (preds is int tensor):
635
+ >>> from torchmetrics.functional.classification import multilabel_f1_score
636
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
637
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
638
+ >>> multilabel_f1_score(preds, target, num_labels=3)
639
+ tensor(0.5556)
640
+ >>> multilabel_f1_score(preds, target, num_labels=3, average=None)
641
+ tensor([1.0000, 0.0000, 0.6667])
642
+
643
+ Example (preds is float tensor):
644
+ >>> from torchmetrics.functional.classification import multilabel_f1_score
645
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
646
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
647
+ >>> multilabel_f1_score(preds, target, num_labels=3)
648
+ tensor(0.5556)
649
+ >>> multilabel_f1_score(preds, target, num_labels=3, average=None)
650
+ tensor([1.0000, 0.0000, 0.6667])
651
+
652
+ Example (multidim tensors):
653
+ >>> from torchmetrics.functional.classification import multilabel_f1_score
654
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
655
+ >>> preds = torch.tensor(
656
+ ... [
657
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
658
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
659
+ ... ]
660
+ ... )
661
+ >>> multilabel_f1_score(preds, target, num_labels=3, multidim_average='samplewise')
662
+ tensor([0.4444, 0.0000])
663
+ >>> multilabel_f1_score(preds, target, num_labels=3, multidim_average='samplewise', average=None)
664
+ tensor([[0.6667, 0.6667, 0.0000],
665
+ [0.0000, 0.0000, 0.0000]])
666
+ """
667
+ return multilabel_fbeta_score(
668
+ preds=preds,
669
+ target=target,
670
+ beta=1.0,
671
+ num_labels=num_labels,
672
+ threshold=threshold,
673
+ average=average,
674
+ multidim_average=multidim_average,
675
+ ignore_index=ignore_index,
676
+ validate_args=validate_args,
677
+ )
678
+
679
+
680
+ def fbeta_score(
681
+ preds: Tensor,
682
+ target: Tensor,
683
+ task: Literal["binary", "multiclass", "multilabel"],
684
+ beta: float = 1.0,
685
+ threshold: float = 0.5,
686
+ num_classes: Optional[int] = None,
687
+ num_labels: Optional[int] = None,
688
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
689
+ multidim_average: Optional[Literal["global", "samplewise"]] = "global",
690
+ top_k: Optional[int] = 1,
691
+ ignore_index: Optional[int] = None,
692
+ validate_args: bool = True,
693
+ ) -> Tensor:
694
+ r"""Computes `F-score`_ metric:
695
+
696
+ .. math::
697
+ F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
698
+ {(\beta^2 * \text{precision}) + \text{recall}}
699
+
700
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
701
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
702
+ :func:`binary_fbeta_score`, :func:`multiclass_fbeta_score` and :func:`multilabel_fbeta_score` for the specific
703
+ details of each argument influence and examples.
704
+
705
+ Legacy Example:
706
+ >>> target = torch.tensor([0, 1, 2, 0, 1, 2])
707
+ >>> preds = torch.tensor([0, 2, 1, 0, 0, 1])
708
+ >>> fbeta_score(preds, target, task="multiclass", num_classes=3, beta=0.5)
709
+ tensor(0.3333)
710
+ """
711
+ assert multidim_average is not None
712
+ if task == "binary":
713
+ return binary_fbeta_score(preds, target, beta, threshold, multidim_average, ignore_index, validate_args)
714
+ if task == "multiclass":
715
+ assert isinstance(num_classes, int)
716
+ assert isinstance(top_k, int)
717
+ return multiclass_fbeta_score(
718
+ preds, target, beta, num_classes, average, top_k, multidim_average, ignore_index, validate_args
719
+ )
720
+ if task == "multilabel":
721
+ assert isinstance(num_labels, int)
722
+ return multilabel_fbeta_score(
723
+ preds, target, beta, num_labels, threshold, average, multidim_average, ignore_index, validate_args
724
+ )
725
+ raise ValueError(
726
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
727
+ )
728
+
729
+
730
+ def f1_score(
731
+ preds: Tensor,
732
+ target: Tensor,
733
+ task: Literal["binary", "multiclass", "multilabel"],
734
+ threshold: float = 0.5,
735
+ num_classes: Optional[int] = None,
736
+ num_labels: Optional[int] = None,
737
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
738
+ multidim_average: Optional[Literal["global", "samplewise"]] = "global",
739
+ top_k: Optional[int] = 1,
740
+ ignore_index: Optional[int] = None,
741
+ validate_args: bool = True,
742
+ ) -> Tensor:
743
+ r"""Computes F-1 score:
744
+
745
+ .. math::
746
+ F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}
747
+
748
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
749
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
750
+ :func:`binary_f1_score`, :func:`multiclass_f1_score` and :func:`multilabel_f1_score` for the specific
751
+ details of each argument influence and examples.
752
+
753
+ Legacy Example:
754
+ >>> target = torch.tensor([0, 1, 2, 0, 1, 2])
755
+ >>> preds = torch.tensor([0, 2, 1, 0, 0, 1])
756
+ >>> f1_score(preds, target, task="multiclass", num_classes=3)
757
+ tensor(0.3333)
758
+ """
759
+ assert multidim_average is not None
760
+ if task == "binary":
761
+ return binary_f1_score(preds, target, threshold, multidim_average, ignore_index, validate_args)
762
+ if task == "multiclass":
763
+ assert isinstance(num_classes, int)
764
+ assert isinstance(top_k, int)
765
+ return multiclass_f1_score(
766
+ preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
767
+ )
768
+ if task == "multilabel":
769
+ assert isinstance(num_labels, int)
770
+ return multilabel_f1_score(
771
+ preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
772
+ )
773
+ raise ValueError(
774
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
775
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/hinge.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional, Tuple
15
+
16
+ import torch
17
+ from torch import Tensor, tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.confusion_matrix import (
21
+ _binary_confusion_matrix_format,
22
+ _binary_confusion_matrix_tensor_validation,
23
+ _multiclass_confusion_matrix_format,
24
+ _multiclass_confusion_matrix_tensor_validation,
25
+ )
26
+ from torchmetrics.utilities.data import to_onehot
27
+
28
+
29
+ def _hinge_loss_compute(measure: Tensor, total: Tensor) -> Tensor:
30
+ return measure / total
31
+
32
+
33
+ def _binary_hinge_loss_arg_validation(squared: bool, ignore_index: Optional[int] = None) -> None:
34
+ if not isinstance(squared, bool):
35
+ raise ValueError(f"Expected argument `squared` to be an bool but got {squared}")
36
+ if ignore_index is not None and not isinstance(ignore_index, int):
37
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
38
+
39
+
40
+ def _binary_hinge_loss_tensor_validation(preds: Tensor, target: Tensor, ignore_index: Optional[int] = None) -> None:
41
+ _binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
42
+ if not preds.is_floating_point():
43
+ raise ValueError(
44
+ "Expected argument `preds` to be floating tensor with probabilities/logits"
45
+ f" but got tensor with dtype {preds.dtype}"
46
+ )
47
+
48
+
49
+ def _binary_hinge_loss_update(
50
+ preds: Tensor,
51
+ target: Tensor,
52
+ squared: bool,
53
+ ) -> Tuple[Tensor, Tensor]:
54
+
55
+ target = target.bool()
56
+ margin = torch.zeros_like(preds)
57
+ margin[target] = preds[target]
58
+ margin[~target] = -preds[~target]
59
+
60
+ measures = 1 - margin
61
+ measures = torch.clamp(measures, 0)
62
+
63
+ if squared:
64
+ measures = measures.pow(2)
65
+
66
+ total = tensor(target.shape[0], device=target.device)
67
+ return measures.sum(dim=0), total
68
+
69
+
70
+ def binary_hinge_loss(
71
+ preds: Tensor,
72
+ target: Tensor,
73
+ squared: bool = False,
74
+ ignore_index: Optional[int] = None,
75
+ validate_args: bool = False,
76
+ ) -> Tensor:
77
+ r"""Computes the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs) for binary tasks. It is
78
+ defined as:
79
+
80
+ .. math::
81
+ \text{Hinge loss} = \max(0, 1 - y \times \hat{y})
82
+
83
+ Where :math:`y \in {-1, 1}` is the target, and :math:`\hat{y} \in \mathbb{R}` is the prediction.
84
+
85
+ Accepts the following input tensors:
86
+
87
+ - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
88
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
89
+ sigmoid per element.
90
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
91
+ only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
92
+
93
+ Additional dimension ``...`` will be flattened into the batch dimension.
94
+
95
+ Args:
96
+ preds: Tensor with predictions
97
+ target: Tensor with true labels
98
+ squared:
99
+ If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss.
100
+ ignore_index:
101
+ Specifies a target value that is ignored and does not contribute to the metric calculation
102
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
103
+ Set to ``False`` for faster computations.
104
+
105
+ Example:
106
+ >>> from torchmetrics.functional.classification import binary_hinge_loss
107
+ >>> preds = torch.tensor([0.25, 0.25, 0.55, 0.75, 0.75])
108
+ >>> target = torch.tensor([0, 0, 1, 1, 1])
109
+ >>> binary_hinge_loss(preds, target)
110
+ tensor(0.6900)
111
+ >>> binary_hinge_loss(preds, target, squared=True)
112
+ tensor(0.6905)
113
+ """
114
+ if validate_args:
115
+ _binary_hinge_loss_arg_validation(squared, ignore_index)
116
+ _binary_hinge_loss_tensor_validation(preds, target, ignore_index)
117
+ preds, target = _binary_confusion_matrix_format(
118
+ preds, target, threshold=0.0, ignore_index=ignore_index, convert_to_labels=False
119
+ )
120
+ measures, total = _binary_hinge_loss_update(preds, target, squared)
121
+ return _hinge_loss_compute(measures, total)
122
+
123
+
124
+ def _multiclass_hinge_loss_arg_validation(
125
+ num_classes: int,
126
+ squared: bool = False,
127
+ multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer",
128
+ ignore_index: Optional[int] = None,
129
+ ) -> None:
130
+ _binary_hinge_loss_arg_validation(squared, ignore_index)
131
+ if not isinstance(num_classes, int) or num_classes < 2:
132
+ raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
133
+ allowed_mm = ("crammer-singer", "one-vs-all")
134
+ if multiclass_mode not in allowed_mm:
135
+ raise ValueError(f"Expected argument `multiclass_mode` to be one of {allowed_mm}, but got {multiclass_mode}.")
136
+
137
+
138
+ def _multiclass_hinge_loss_tensor_validation(
139
+ preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None
140
+ ) -> None:
141
+ _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
142
+ if not preds.is_floating_point():
143
+ raise ValueError(
144
+ "Expected argument `preds` to be floating tensor with probabilities/logits"
145
+ f" but got tensor with dtype {preds.dtype}"
146
+ )
147
+
148
+
149
+ def _multiclass_hinge_loss_update(
150
+ preds: Tensor,
151
+ target: Tensor,
152
+ squared: bool,
153
+ multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer",
154
+ ) -> Tuple[Tensor, Tensor]:
155
+ if not torch.all((0 <= preds) * (preds <= 1)):
156
+ preds = preds.softmax(1)
157
+
158
+ target = to_onehot(target, max(2, preds.shape[1])).bool()
159
+ if multiclass_mode == "crammer-singer":
160
+ margin = preds[target]
161
+ margin -= torch.max(preds[~target].view(preds.shape[0], -1), dim=1)[0]
162
+ else:
163
+ target = target.bool()
164
+ margin = torch.zeros_like(preds)
165
+ margin[target] = preds[target]
166
+ margin[~target] = -preds[~target]
167
+
168
+ measures = 1 - margin
169
+ measures = torch.clamp(measures, 0)
170
+
171
+ if squared:
172
+ measures = measures.pow(2)
173
+
174
+ total = tensor(target.shape[0], device=target.device)
175
+ return measures.sum(dim=0), total
176
+
177
+
178
+ def multiclass_hinge_loss(
179
+ preds: Tensor,
180
+ target: Tensor,
181
+ num_classes: int,
182
+ squared: bool = False,
183
+ multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer",
184
+ ignore_index: Optional[int] = None,
185
+ validate_args: bool = False,
186
+ ) -> Tensor:
187
+ r"""Computes the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs) for multiclass tasks.
188
+
189
+ The metric can be computed in two ways. Either, the definition by Crammer and Singer is used:
190
+
191
+ .. math::
192
+ \text{Hinge loss} = \max\left(0, 1 - \hat{y}_y + \max_{i \ne y} (\hat{y}_i)\right)
193
+
194
+ Where :math:`y \in {0, ..., \mathrm{C}}` is the target class (where :math:`\mathrm{C}` is the number of classes),
195
+ and :math:`\hat{y} \in \mathbb{R}^\mathrm{C}` is the predicted output per class. Alternatively, the metric can
196
+ also be computed in one-vs-all approach, where each class is valued against all other classes in a binary fashion.
197
+
198
+ Accepts the following input tensors:
199
+
200
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
201
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
202
+ softmax per sample.
203
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
204
+ only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
205
+
206
+ Additional dimension ``...`` will be flattened into the batch dimension.
207
+
208
+ Args:
209
+ preds: Tensor with predictions
210
+ target: Tensor with true labels
211
+ num_classes: Integer specifing the number of classes
212
+ squared:
213
+ If True, this will compute the squared hinge loss. Otherwise, computes the regular hinge loss.
214
+ multiclass_mode:
215
+ Determines how to compute the metric
216
+ ignore_index:
217
+ Specifies a target value that is ignored and does not contribute to the metric calculation
218
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
219
+ Set to ``False`` for faster computations.
220
+
221
+ Example:
222
+ >>> from torchmetrics.functional.classification import multiclass_hinge_loss
223
+ >>> preds = torch.tensor([[0.25, 0.20, 0.55],
224
+ ... [0.55, 0.05, 0.40],
225
+ ... [0.10, 0.30, 0.60],
226
+ ... [0.90, 0.05, 0.05]])
227
+ >>> target = torch.tensor([0, 1, 2, 0])
228
+ >>> multiclass_hinge_loss(preds, target, num_classes=3)
229
+ tensor(0.9125)
230
+ >>> multiclass_hinge_loss(preds, target, num_classes=3, squared=True)
231
+ tensor(1.1131)
232
+ >>> multiclass_hinge_loss(preds, target, num_classes=3, multiclass_mode='one-vs-all')
233
+ tensor([0.8750, 1.1250, 1.1000])
234
+ """
235
+ if validate_args:
236
+ _multiclass_hinge_loss_arg_validation(num_classes, squared, multiclass_mode, ignore_index)
237
+ _multiclass_hinge_loss_tensor_validation(preds, target, num_classes, ignore_index)
238
+ preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index, convert_to_labels=False)
239
+ measures, total = _multiclass_hinge_loss_update(preds, target, squared, multiclass_mode)
240
+ return _hinge_loss_compute(measures, total)
241
+
242
+
243
+ def hinge_loss(
244
+ preds: Tensor,
245
+ target: Tensor,
246
+ task: Literal["binary", "multiclass"],
247
+ num_classes: Optional[int] = None,
248
+ squared: bool = False,
249
+ multiclass_mode: Literal["crammer-singer", "one-vs-all"] = "crammer-singer",
250
+ ignore_index: Optional[int] = None,
251
+ validate_args: bool = True,
252
+ ) -> Tensor:
253
+ r"""Computes the mean `Hinge loss`_ typically used for Support Vector Machines (SVMs).
254
+
255
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
256
+ ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
257
+ :func:`binary_hinge_loss` and :func:`multiclass_hinge_loss` for the specific details of
258
+ each argument influence and examples.
259
+
260
+ Legacy Example:
261
+ >>> import torch
262
+ >>> target = torch.tensor([0, 1, 1])
263
+ >>> preds = torch.tensor([0.5, 0.7, 0.1])
264
+ >>> hinge_loss(preds, target, task="binary")
265
+ tensor(0.9000)
266
+
267
+ >>> target = torch.tensor([0, 1, 2])
268
+ >>> preds = torch.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]])
269
+ >>> hinge_loss(preds, target, task="multiclass", num_classes=3)
270
+ tensor(1.5551)
271
+
272
+ >>> target = torch.tensor([0, 1, 2])
273
+ >>> preds = torch.tensor([[-1.0, 0.9, 0.2], [0.5, -1.1, 0.8], [2.2, -0.5, 0.3]])
274
+ >>> hinge_loss(preds, target, task="multiclass", num_classes=3, multiclass_mode="one-vs-all")
275
+ tensor([1.3743, 1.1945, 1.2359])
276
+ """
277
+ if task == "binary":
278
+ return binary_hinge_loss(preds, target, squared, ignore_index, validate_args)
279
+ if task == "multiclass":
280
+ assert isinstance(num_classes, int)
281
+ return multiclass_hinge_loss(preds, target, num_classes, squared, multiclass_mode, ignore_index, validate_args)
282
+ raise ValueError(f"Expected argument `task` to either be `'binary'` or `'multilabel'` but got {task}")
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/matthews_corrcoef.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.confusion_matrix import (
21
+ _binary_confusion_matrix_arg_validation,
22
+ _binary_confusion_matrix_format,
23
+ _binary_confusion_matrix_tensor_validation,
24
+ _binary_confusion_matrix_update,
25
+ _multiclass_confusion_matrix_arg_validation,
26
+ _multiclass_confusion_matrix_format,
27
+ _multiclass_confusion_matrix_tensor_validation,
28
+ _multiclass_confusion_matrix_update,
29
+ _multilabel_confusion_matrix_arg_validation,
30
+ _multilabel_confusion_matrix_format,
31
+ _multilabel_confusion_matrix_tensor_validation,
32
+ _multilabel_confusion_matrix_update,
33
+ )
34
+
35
+
36
+ def _matthews_corrcoef_reduce(confmat: Tensor) -> Tensor:
37
+ """Reduce an un-normalized confusion matrix of shape (n_classes, n_classes) into the matthews corrcoef
38
+ score."""
39
+ # convert multilabel into binary
40
+ confmat = confmat.sum(0) if confmat.ndim == 3 else confmat
41
+
42
+ tk = confmat.sum(dim=-1).float()
43
+ pk = confmat.sum(dim=-2).float()
44
+ c = torch.trace(confmat).float()
45
+ s = confmat.sum().float()
46
+
47
+ cov_ytyp = c * s - sum(tk * pk)
48
+ cov_ypyp = s**2 - sum(pk * pk)
49
+ cov_ytyt = s**2 - sum(tk * tk)
50
+
51
+ denom = cov_ypyp * cov_ytyt
52
+ if denom == 0:
53
+ return torch.tensor(0, dtype=confmat.dtype, device=confmat.device)
54
+ else:
55
+ return cov_ytyp / torch.sqrt(denom)
56
+
57
+
58
+ def binary_matthews_corrcoef(
59
+ preds: Tensor,
60
+ target: Tensor,
61
+ threshold: float = 0.5,
62
+ ignore_index: Optional[int] = None,
63
+ validate_args: bool = True,
64
+ ) -> Tensor:
65
+ r"""Calculates `Matthews correlation coefficient`_ for binary tasks. This metric measures the general
66
+ correlation or quality of a classification.
67
+
68
+ Accepts the following input tensors:
69
+
70
+ - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
71
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
72
+ we convert to int tensor with thresholding using the value in ``threshold``.
73
+ - ``target`` (int tensor): ``(N, ...)``
74
+
75
+ Additional dimension ``...`` will be flattened into the batch dimension.
76
+
77
+ Args:
78
+ threshold: Threshold for transforming probability to binary (0,1) predictions
79
+ ignore_index:
80
+ Specifies a target value that is ignored and does not contribute to the metric calculation
81
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
82
+ Set to ``False`` for faster computations.
83
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
84
+
85
+ Example (preds is int tensor):
86
+ >>> from torchmetrics.functional.classification import binary_matthews_corrcoef
87
+ >>> target = torch.tensor([1, 1, 0, 0])
88
+ >>> preds = torch.tensor([0, 1, 0, 0])
89
+ >>> binary_matthews_corrcoef(preds, target)
90
+ tensor(0.5774)
91
+
92
+ Example (preds is float tensor):
93
+ >>> from torchmetrics.functional.classification import binary_matthews_corrcoef
94
+ >>> target = torch.tensor([1, 1, 0, 0])
95
+ >>> preds = torch.tensor([0.35, 0.85, 0.48, 0.01])
96
+ >>> binary_matthews_corrcoef(preds, target)
97
+ tensor(0.5774)
98
+ """
99
+ if validate_args:
100
+ _binary_confusion_matrix_arg_validation(threshold, ignore_index, normalize=None)
101
+ _binary_confusion_matrix_tensor_validation(preds, target, ignore_index)
102
+ preds, target = _binary_confusion_matrix_format(preds, target, threshold, ignore_index)
103
+ confmat = _binary_confusion_matrix_update(preds, target)
104
+ return _matthews_corrcoef_reduce(confmat)
105
+
106
+
107
+ def multiclass_matthews_corrcoef(
108
+ preds: Tensor,
109
+ target: Tensor,
110
+ num_classes: int,
111
+ ignore_index: Optional[int] = None,
112
+ validate_args: bool = True,
113
+ ) -> Tensor:
114
+ r"""Calculates `Matthews correlation coefficient`_ for multiclass tasks. This metric measures the general
115
+ correlation or quality of a classification.
116
+
117
+ Accepts the following input tensors:
118
+
119
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
120
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
121
+ an int tensor.
122
+ - ``target`` (int tensor): ``(N, ...)``
123
+
124
+ Additional dimension ``...`` will be flattened into the batch dimension.
125
+
126
+ Args:
127
+ num_classes: Integer specifing the number of classes
128
+ ignore_index:
129
+ Specifies a target value that is ignored and does not contribute to the metric calculation
130
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
131
+ Set to ``False`` for faster computations.
132
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
133
+
134
+ Example (pred is integer tensor):
135
+ >>> from torchmetrics.functional.classification import multiclass_matthews_corrcoef
136
+ >>> target = torch.tensor([2, 1, 0, 0])
137
+ >>> preds = torch.tensor([2, 1, 0, 1])
138
+ >>> multiclass_matthews_corrcoef(preds, target, num_classes=3)
139
+ tensor(0.7000)
140
+
141
+ Example (pred is float tensor):
142
+ >>> from torchmetrics.functional.classification import multiclass_matthews_corrcoef
143
+ >>> target = torch.tensor([2, 1, 0, 0])
144
+ >>> preds = torch.tensor([
145
+ ... [0.16, 0.26, 0.58],
146
+ ... [0.22, 0.61, 0.17],
147
+ ... [0.71, 0.09, 0.20],
148
+ ... [0.05, 0.82, 0.13],
149
+ ... ])
150
+ >>> multiclass_matthews_corrcoef(preds, target, num_classes=3)
151
+ tensor(0.7000)
152
+ """
153
+ if validate_args:
154
+ _multiclass_confusion_matrix_arg_validation(num_classes, ignore_index, normalize=None)
155
+ _multiclass_confusion_matrix_tensor_validation(preds, target, num_classes, ignore_index)
156
+ preds, target = _multiclass_confusion_matrix_format(preds, target, ignore_index)
157
+ confmat = _multiclass_confusion_matrix_update(preds, target, num_classes)
158
+ return _matthews_corrcoef_reduce(confmat)
159
+
160
+
161
+ def multilabel_matthews_corrcoef(
162
+ preds: Tensor,
163
+ target: Tensor,
164
+ num_labels: int,
165
+ threshold: float = 0.5,
166
+ ignore_index: Optional[int] = None,
167
+ validate_args: bool = True,
168
+ ) -> Tensor:
169
+ r"""Calculates `Matthews correlation coefficient`_ for multilabel tasks. This metric measures the general
170
+ correlation or quality of a classification.
171
+
172
+ Accepts the following input tensors:
173
+
174
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
175
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
176
+ we convert to int tensor with thresholding using the value in ``threshold``.
177
+ - ``target`` (int tensor): ``(N, C, ...)``
178
+
179
+ Additional dimension ``...`` will be flattened into the batch dimension.
180
+
181
+ Args:
182
+ num_classes: Integer specifing the number of labels
183
+ threshold: Threshold for transforming probability to binary (0,1) predictions
184
+ ignore_index:
185
+ Specifies a target value that is ignored and does not contribute to the metric calculation
186
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
187
+ Set to ``False`` for faster computations.
188
+ kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
189
+
190
+ Example (preds is int tensor):
191
+ >>> from torchmetrics.functional.classification import multilabel_matthews_corrcoef
192
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
193
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
194
+ >>> multilabel_matthews_corrcoef(preds, target, num_labels=3)
195
+ tensor(0.3333)
196
+
197
+ Example (preds is float tensor):
198
+ >>> from torchmetrics.functional.classification import multilabel_matthews_corrcoef
199
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
200
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
201
+ >>> multilabel_matthews_corrcoef(preds, target, num_labels=3)
202
+ tensor(0.3333)
203
+ """
204
+ if validate_args:
205
+ _multilabel_confusion_matrix_arg_validation(num_labels, threshold, ignore_index, normalize=None)
206
+ _multilabel_confusion_matrix_tensor_validation(preds, target, num_labels, ignore_index)
207
+ preds, target = _multilabel_confusion_matrix_format(preds, target, num_labels, threshold, ignore_index)
208
+ confmat = _multilabel_confusion_matrix_update(preds, target, num_labels)
209
+ return _matthews_corrcoef_reduce(confmat)
210
+
211
+
212
+ def matthews_corrcoef(
213
+ preds: Tensor,
214
+ target: Tensor,
215
+ task: Literal["binary", "multiclass", "multilabel"] = None,
216
+ threshold: float = 0.5,
217
+ num_classes: Optional[int] = None,
218
+ num_labels: Optional[int] = None,
219
+ ignore_index: Optional[int] = None,
220
+ validate_args: bool = True,
221
+ ) -> Tensor:
222
+ r"""Calculates `Matthews correlation coefficient`_ . This metric measures the general correlation or quality of
223
+ a classification.
224
+
225
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
226
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
227
+ :func:`binary_matthews_corrcoef`, :func:`multiclass_matthews_corrcoef` and :func:`multilabel_matthews_corrcoef` for
228
+ the specific details of each argument influence and examples.
229
+
230
+ Legacy Example:
231
+ >>> target = torch.tensor([1, 1, 0, 0])
232
+ >>> preds = torch.tensor([0, 1, 0, 0])
233
+ >>> matthews_corrcoef(preds, target, task="multiclass", num_classes=2)
234
+ tensor(0.5774)
235
+ """
236
+ if task == "binary":
237
+ return binary_matthews_corrcoef(preds, target, threshold, ignore_index, validate_args)
238
+ if task == "multiclass":
239
+ assert isinstance(num_classes, int)
240
+ return multiclass_matthews_corrcoef(preds, target, num_classes, ignore_index, validate_args)
241
+ if task == "multilabel":
242
+ assert isinstance(num_labels, int)
243
+ return multilabel_matthews_corrcoef(preds, target, num_labels, threshold, ignore_index, validate_args)
244
+ raise ValueError(
245
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
246
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Optional
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.stat_scores import (
21
+ _binary_stat_scores_arg_validation,
22
+ _binary_stat_scores_format,
23
+ _binary_stat_scores_tensor_validation,
24
+ _binary_stat_scores_update,
25
+ _multiclass_stat_scores_arg_validation,
26
+ _multiclass_stat_scores_format,
27
+ _multiclass_stat_scores_tensor_validation,
28
+ _multiclass_stat_scores_update,
29
+ _multilabel_stat_scores_arg_validation,
30
+ _multilabel_stat_scores_format,
31
+ _multilabel_stat_scores_tensor_validation,
32
+ _multilabel_stat_scores_update,
33
+ )
34
+ from torchmetrics.utilities.compute import _safe_divide
35
+
36
+
37
+ def _precision_recall_reduce(
38
+ stat: Literal["precision", "recall"],
39
+ tp: Tensor,
40
+ fp: Tensor,
41
+ tn: Tensor,
42
+ fn: Tensor,
43
+ average: Optional[Literal["binary", "micro", "macro", "weighted", "none"]],
44
+ multidim_average: Literal["global", "samplewise"] = "global",
45
+ ) -> Tensor:
46
+ different_stat = fp if stat == "precision" else fn # this is what differs between the two scores
47
+ if average == "binary":
48
+ return _safe_divide(tp, tp + different_stat)
49
+ elif average == "micro":
50
+ tp = tp.sum(dim=0 if multidim_average == "global" else 1)
51
+ fn = fn.sum(dim=0 if multidim_average == "global" else 1)
52
+ different_stat = different_stat.sum(dim=0 if multidim_average == "global" else 1)
53
+ return _safe_divide(tp, tp + different_stat)
54
+ else:
55
+ score = _safe_divide(tp, tp + different_stat)
56
+ if average is None or average == "none":
57
+ return score
58
+ if average == "weighted":
59
+ weights = tp + fn
60
+ else:
61
+ weights = torch.ones_like(score)
62
+ return _safe_divide(weights * score, weights.sum(-1, keepdim=True)).sum(-1)
63
+
64
+
65
+ def binary_precision(
66
+ preds: Tensor,
67
+ target: Tensor,
68
+ threshold: float = 0.5,
69
+ multidim_average: Literal["global", "samplewise"] = "global",
70
+ ignore_index: Optional[int] = None,
71
+ validate_args: bool = True,
72
+ ) -> Tensor:
73
+ r"""Computes `Precision`_ for binary tasks:
74
+
75
+ .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
76
+
77
+ Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
78
+ false positives respecitively.
79
+
80
+ Accepts the following input tensors:
81
+
82
+ - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
83
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
84
+ we convert to int tensor with thresholding using the value in ``threshold``.
85
+ - ``target`` (int tensor): ``(N, ...)``
86
+
87
+ Args:
88
+ preds: Tensor with predictions
89
+ target: Tensor with true labels
90
+ threshold: Threshold for transforming probability to binary {0,1} predictions
91
+ multidim_average:
92
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
93
+
94
+ - ``global``: Additional dimensions are flatted along the batch dimension
95
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
96
+ The statistics in this case are calculated over the additional dimensions.
97
+
98
+ ignore_index:
99
+ Specifies a target value that is ignored and does not contribute to the metric calculation
100
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
101
+ Set to ``False`` for faster computations.
102
+
103
+ Returns:
104
+ If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
105
+ is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
106
+
107
+ Example (preds is int tensor):
108
+ >>> from torchmetrics.functional.classification import binary_precision
109
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
110
+ >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
111
+ >>> binary_precision(preds, target)
112
+ tensor(0.6667)
113
+
114
+ Example (preds is float tensor):
115
+ >>> from torchmetrics.functional.classification import binary_precision
116
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
117
+ >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
118
+ >>> binary_precision(preds, target)
119
+ tensor(0.6667)
120
+
121
+ Example (multidim tensors):
122
+ >>> from torchmetrics.functional.classification import binary_precision
123
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
124
+ >>> preds = torch.tensor(
125
+ ... [
126
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
127
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
128
+ ... ]
129
+ ... )
130
+ >>> binary_precision(preds, target, multidim_average='samplewise')
131
+ tensor([0.4000, 0.0000])
132
+ """
133
+ if validate_args:
134
+ _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
135
+ _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
136
+ preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
137
+ tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
138
+ return _precision_recall_reduce("precision", tp, fp, tn, fn, average="binary", multidim_average=multidim_average)
139
+
140
+
141
+ def multiclass_precision(
142
+ preds: Tensor,
143
+ target: Tensor,
144
+ num_classes: int,
145
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
146
+ top_k: int = 1,
147
+ multidim_average: Literal["global", "samplewise"] = "global",
148
+ ignore_index: Optional[int] = None,
149
+ validate_args: bool = True,
150
+ ) -> Tensor:
151
+ r"""Computes `Precision`_ for multiclass tasks.
152
+
153
+ .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
154
+
155
+ Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
156
+ false positives respecitively.
157
+
158
+ Accepts the following input tensors:
159
+
160
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
161
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
162
+ an int tensor.
163
+ - ``target`` (int tensor): ``(N, ...)``
164
+
165
+ Args:
166
+ preds: Tensor with predictions
167
+ target: Tensor with true labels
168
+ num_classes: Integer specifing the number of classes
169
+ average:
170
+ Defines the reduction that is applied over labels. Should be one of the following:
171
+
172
+ - ``micro``: Sum statistics over all labels
173
+ - ``macro``: Calculate statistics for each label and average them
174
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
175
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
176
+
177
+ top_k:
178
+ Number of highest probability or logit score predictions considered to find the correct label.
179
+ Only works when ``preds`` contain probabilities/logits.
180
+ multidim_average:
181
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
182
+
183
+ - ``global``: Additional dimensions are flatted along the batch dimension
184
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
185
+ The statistics in this case are calculated over the additional dimensions.
186
+
187
+ ignore_index:
188
+ Specifies a target value that is ignored and does not contribute to the metric calculation
189
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
190
+ Set to ``False`` for faster computations.
191
+
192
+ Returns:
193
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
194
+
195
+ - If ``multidim_average`` is set to ``global``:
196
+
197
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
198
+ - If ``average=None/'none'``, the shape will be ``(C,)``
199
+
200
+ - If ``multidim_average`` is set to ``samplewise``:
201
+
202
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
203
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
204
+
205
+ Example (preds is int tensor):
206
+ >>> from torchmetrics.functional.classification import multiclass_precision
207
+ >>> target = torch.tensor([2, 1, 0, 0])
208
+ >>> preds = torch.tensor([2, 1, 0, 1])
209
+ >>> multiclass_precision(preds, target, num_classes=3)
210
+ tensor(0.8333)
211
+ >>> multiclass_precision(preds, target, num_classes=3, average=None)
212
+ tensor([1.0000, 0.5000, 1.0000])
213
+
214
+ Example (preds is float tensor):
215
+ >>> from torchmetrics.functional.classification import multiclass_precision
216
+ >>> target = torch.tensor([2, 1, 0, 0])
217
+ >>> preds = torch.tensor([
218
+ ... [0.16, 0.26, 0.58],
219
+ ... [0.22, 0.61, 0.17],
220
+ ... [0.71, 0.09, 0.20],
221
+ ... [0.05, 0.82, 0.13],
222
+ ... ])
223
+ >>> multiclass_precision(preds, target, num_classes=3)
224
+ tensor(0.8333)
225
+ >>> multiclass_precision(preds, target, num_classes=3, average=None)
226
+ tensor([1.0000, 0.5000, 1.0000])
227
+
228
+ Example (multidim tensors):
229
+ >>> from torchmetrics.functional.classification import multiclass_precision
230
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
231
+ >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
232
+ >>> multiclass_precision(preds, target, num_classes=3, multidim_average='samplewise')
233
+ tensor([0.3889, 0.2778])
234
+ >>> multiclass_precision(preds, target, num_classes=3, multidim_average='samplewise', average=None)
235
+ tensor([[0.6667, 0.0000, 0.5000],
236
+ [0.0000, 0.5000, 0.3333]])
237
+ """
238
+ if validate_args:
239
+ _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
240
+ _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
241
+ preds, target = _multiclass_stat_scores_format(preds, target, top_k)
242
+ tp, fp, tn, fn = _multiclass_stat_scores_update(
243
+ preds, target, num_classes, top_k, average, multidim_average, ignore_index
244
+ )
245
+ return _precision_recall_reduce("precision", tp, fp, tn, fn, average=average, multidim_average=multidim_average)
246
+
247
+
248
+ def multilabel_precision(
249
+ preds: Tensor,
250
+ target: Tensor,
251
+ num_labels: int,
252
+ threshold: float = 0.5,
253
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
254
+ multidim_average: Literal["global", "samplewise"] = "global",
255
+ ignore_index: Optional[int] = None,
256
+ validate_args: bool = True,
257
+ ) -> Tensor:
258
+ r"""Computes `Precision`_ for multilabel tasks.
259
+
260
+ .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
261
+
262
+ Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
263
+ false positives respecitively.
264
+
265
+ Accepts the following input tensors:
266
+
267
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
268
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
269
+ we convert to int tensor with thresholding using the value in ``threshold``.
270
+ - ``target`` (int tensor): ``(N, C, ...)``
271
+
272
+ Args:
273
+ preds: Tensor with predictions
274
+ target: Tensor with true labels
275
+ num_labels: Integer specifing the number of labels
276
+ threshold: Threshold for transforming probability to binary (0,1) predictions
277
+ average:
278
+ Defines the reduction that is applied over labels. Should be one of the following:
279
+
280
+ - ``micro``: Sum statistics over all labels
281
+ - ``macro``: Calculate statistics for each label and average them
282
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
283
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
284
+
285
+ multidim_average:
286
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
287
+
288
+ - ``global``: Additional dimensions are flatted along the batch dimension
289
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
290
+ The statistics in this case are calculated over the additional dimensions.
291
+
292
+ ignore_index:
293
+ Specifies a target value that is ignored and does not contribute to the metric calculation
294
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
295
+ Set to ``False`` for faster computations.
296
+
297
+ Returns:
298
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
299
+
300
+ - If ``multidim_average`` is set to ``global``:
301
+
302
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
303
+ - If ``average=None/'none'``, the shape will be ``(C,)``
304
+
305
+ - If ``multidim_average`` is set to ``samplewise``:
306
+
307
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
308
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
309
+
310
+ Example (preds is int tensor):
311
+ >>> from torchmetrics.functional.classification import multilabel_precision
312
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
313
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
314
+ >>> multilabel_precision(preds, target, num_labels=3)
315
+ tensor(0.5000)
316
+ >>> multilabel_precision(preds, target, num_labels=3, average=None)
317
+ tensor([1.0000, 0.0000, 0.5000])
318
+
319
+ Example (preds is float tensor):
320
+ >>> from torchmetrics.functional.classification import multilabel_precision
321
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
322
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
323
+ >>> multilabel_precision(preds, target, num_labels=3)
324
+ tensor(0.5000)
325
+ >>> multilabel_precision(preds, target, num_labels=3, average=None)
326
+ tensor([1.0000, 0.0000, 0.5000])
327
+
328
+ Example (multidim tensors):
329
+ >>> from torchmetrics.functional.classification import multilabel_precision
330
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
331
+ >>> preds = torch.tensor(
332
+ ... [
333
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
334
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
335
+ ... ]
336
+ ... )
337
+ >>> multilabel_precision(preds, target, num_labels=3, multidim_average='samplewise')
338
+ tensor([0.3333, 0.0000])
339
+ >>> multilabel_precision(preds, target, num_labels=3, multidim_average='samplewise', average=None)
340
+ tensor([[0.5000, 0.5000, 0.0000],
341
+ [0.0000, 0.0000, 0.0000]])
342
+ """
343
+ if validate_args:
344
+ _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
345
+ _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
346
+ preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
347
+ tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
348
+ return _precision_recall_reduce("precision", tp, fp, tn, fn, average=average, multidim_average=multidim_average)
349
+
350
+
351
+ def binary_recall(
352
+ preds: Tensor,
353
+ target: Tensor,
354
+ threshold: float = 0.5,
355
+ multidim_average: Literal["global", "samplewise"] = "global",
356
+ ignore_index: Optional[int] = None,
357
+ validate_args: bool = True,
358
+ ) -> Tensor:
359
+ r"""Computes `Recall`_ for binary tasks:
360
+
361
+ .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
362
+
363
+ Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
364
+ false negatives respecitively.
365
+
366
+ Accepts the following input tensors:
367
+
368
+ - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
369
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
370
+ we convert to int tensor with thresholding using the value in ``threshold``.
371
+ - ``target`` (int tensor): ``(N, ...)``
372
+
373
+ Args:
374
+ preds: Tensor with predictions
375
+ target: Tensor with true labels
376
+ threshold: Threshold for transforming probability to binary {0,1} predictions
377
+ multidim_average:
378
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
379
+
380
+ - ``global``: Additional dimensions are flatted along the batch dimension
381
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
382
+ The statistics in this case are calculated over the additional dimensions.
383
+
384
+ ignore_index:
385
+ Specifies a target value that is ignored and does not contribute to the metric calculation
386
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
387
+ Set to ``False`` for faster computations.
388
+
389
+ Returns:
390
+ If ``multidim_average`` is set to ``global``, the metric returns a scalar value. If ``multidim_average``
391
+ is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar value per sample.
392
+
393
+ Example (preds is int tensor):
394
+ >>> from torchmetrics.functional.classification import binary_recall
395
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
396
+ >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
397
+ >>> binary_recall(preds, target)
398
+ tensor(0.6667)
399
+
400
+ Example (preds is float tensor):
401
+ >>> from torchmetrics.functional.classification import binary_recall
402
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
403
+ >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
404
+ >>> binary_recall(preds, target)
405
+ tensor(0.6667)
406
+
407
+ Example (multidim tensors):
408
+ >>> from torchmetrics.functional.classification import binary_recall
409
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
410
+ >>> preds = torch.tensor(
411
+ ... [
412
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
413
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
414
+ ... ]
415
+ ... )
416
+ >>> binary_recall(preds, target, multidim_average='samplewise')
417
+ tensor([0.6667, 0.0000])
418
+ """
419
+ if validate_args:
420
+ _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
421
+ _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
422
+ preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
423
+ tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
424
+ return _precision_recall_reduce("recall", tp, fp, tn, fn, average="binary", multidim_average=multidim_average)
425
+
426
+
427
+ def multiclass_recall(
428
+ preds: Tensor,
429
+ target: Tensor,
430
+ num_classes: int,
431
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
432
+ top_k: int = 1,
433
+ multidim_average: Literal["global", "samplewise"] = "global",
434
+ ignore_index: Optional[int] = None,
435
+ validate_args: bool = True,
436
+ ) -> Tensor:
437
+ r"""Computes `Recall`_ for multiclass tasks:
438
+
439
+ .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
440
+
441
+ Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
442
+ false negatives respecitively.
443
+
444
+ Accepts the following input tensors:
445
+
446
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
447
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
448
+ an int tensor.
449
+ - ``target`` (int tensor): ``(N, ...)``
450
+
451
+ Args:
452
+ preds: Tensor with predictions
453
+ target: Tensor with true labels
454
+ num_classes: Integer specifing the number of classes
455
+ average:
456
+ Defines the reduction that is applied over labels. Should be one of the following:
457
+
458
+ - ``micro``: Sum statistics over all labels
459
+ - ``macro``: Calculate statistics for each label and average them
460
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
461
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
462
+
463
+ top_k:
464
+ Number of highest probability or logit score predictions considered to find the correct label.
465
+ Only works when ``preds`` contain probabilities/logits.
466
+ multidim_average:
467
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
468
+
469
+ - ``global``: Additional dimensions are flatted along the batch dimension
470
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
471
+ The statistics in this case are calculated over the additional dimensions.
472
+
473
+ ignore_index:
474
+ Specifies a target value that is ignored and does not contribute to the metric calculation
475
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
476
+ Set to ``False`` for faster computations.
477
+
478
+ Returns:
479
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
480
+
481
+ - If ``multidim_average`` is set to ``global``:
482
+
483
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
484
+ - If ``average=None/'none'``, the shape will be ``(C,)``
485
+
486
+ - If ``multidim_average`` is set to ``samplewise``:
487
+
488
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
489
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
490
+
491
+ Example (preds is int tensor):
492
+ >>> from torchmetrics.functional.classification import multiclass_recall
493
+ >>> target = torch.tensor([2, 1, 0, 0])
494
+ >>> preds = torch.tensor([2, 1, 0, 1])
495
+ >>> multiclass_recall(preds, target, num_classes=3)
496
+ tensor(0.8333)
497
+ >>> multiclass_recall(preds, target, num_classes=3, average=None)
498
+ tensor([0.5000, 1.0000, 1.0000])
499
+
500
+ Example (preds is float tensor):
501
+ >>> from torchmetrics.functional.classification import multiclass_recall
502
+ >>> target = torch.tensor([2, 1, 0, 0])
503
+ >>> preds = torch.tensor([
504
+ ... [0.16, 0.26, 0.58],
505
+ ... [0.22, 0.61, 0.17],
506
+ ... [0.71, 0.09, 0.20],
507
+ ... [0.05, 0.82, 0.13],
508
+ ... ])
509
+ >>> multiclass_recall(preds, target, num_classes=3)
510
+ tensor(0.8333)
511
+ >>> multiclass_recall(preds, target, num_classes=3, average=None)
512
+ tensor([0.5000, 1.0000, 1.0000])
513
+
514
+ Example (multidim tensors):
515
+ >>> from torchmetrics.functional.classification import multiclass_recall
516
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
517
+ >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
518
+ >>> multiclass_recall(preds, target, num_classes=3, multidim_average='samplewise')
519
+ tensor([0.5000, 0.2778])
520
+ >>> multiclass_recall(preds, target, num_classes=3, multidim_average='samplewise', average=None)
521
+ tensor([[1.0000, 0.0000, 0.5000],
522
+ [0.0000, 0.3333, 0.5000]])
523
+ """
524
+ if validate_args:
525
+ _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
526
+ _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
527
+ preds, target = _multiclass_stat_scores_format(preds, target, top_k)
528
+ tp, fp, tn, fn = _multiclass_stat_scores_update(
529
+ preds, target, num_classes, top_k, average, multidim_average, ignore_index
530
+ )
531
+ return _precision_recall_reduce("recall", tp, fp, tn, fn, average=average, multidim_average=multidim_average)
532
+
533
+
534
+ def multilabel_recall(
535
+ preds: Tensor,
536
+ target: Tensor,
537
+ num_labels: int,
538
+ threshold: float = 0.5,
539
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
540
+ multidim_average: Literal["global", "samplewise"] = "global",
541
+ ignore_index: Optional[int] = None,
542
+ validate_args: bool = True,
543
+ ) -> Tensor:
544
+ r"""Computes `Recall`_ for multilabel tasks:
545
+
546
+ .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
547
+
548
+ Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
549
+ false negatives respecitively.
550
+
551
+ Accepts the following input tensors:
552
+
553
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
554
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
555
+ we convert to int tensor with thresholding using the value in ``threshold``.
556
+ - ``target`` (int tensor): ``(N, C, ...)``
557
+
558
+ Args:
559
+ preds: Tensor with predictions
560
+ target: Tensor with true labels
561
+ num_labels: Integer specifing the number of labels
562
+ threshold: Threshold for transforming probability to binary (0,1) predictions
563
+ average:
564
+ Defines the reduction that is applied over labels. Should be one of the following:
565
+
566
+ - ``micro``: Sum statistics over all labels
567
+ - ``macro``: Calculate statistics for each label and average them
568
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
569
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
570
+
571
+ multidim_average:
572
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
573
+
574
+ - ``global``: Additional dimensions are flatted along the batch dimension
575
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
576
+ The statistics in this case are calculated over the additional dimensions.
577
+
578
+ ignore_index:
579
+ Specifies a target value that is ignored and does not contribute to the metric calculation
580
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
581
+ Set to ``False`` for faster computations.
582
+
583
+ Returns:
584
+ The returned shape depends on the ``average`` and ``multidim_average`` arguments:
585
+
586
+ - If ``multidim_average`` is set to ``global``:
587
+
588
+ - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
589
+ - If ``average=None/'none'``, the shape will be ``(C,)``
590
+
591
+ - If ``multidim_average`` is set to ``samplewise``:
592
+
593
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
594
+ - If ``average=None/'none'``, the shape will be ``(N, C)``
595
+
596
+ Example (preds is int tensor):
597
+ >>> from torchmetrics.functional.classification import multilabel_recall
598
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
599
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
600
+ >>> multilabel_recall(preds, target, num_labels=3)
601
+ tensor(0.6667)
602
+ >>> multilabel_recall(preds, target, num_labels=3, average=None)
603
+ tensor([1., 0., 1.])
604
+
605
+ Example (preds is float tensor):
606
+ >>> from torchmetrics.functional.classification import multilabel_recall
607
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
608
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
609
+ >>> multilabel_recall(preds, target, num_labels=3)
610
+ tensor(0.6667)
611
+ >>> multilabel_recall(preds, target, num_labels=3, average=None)
612
+ tensor([1., 0., 1.])
613
+
614
+ Example (multidim tensors):
615
+ >>> from torchmetrics.functional.classification import multilabel_recall
616
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
617
+ >>> preds = torch.tensor(
618
+ ... [
619
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
620
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
621
+ ... ]
622
+ ... )
623
+ >>> multilabel_recall(preds, target, num_labels=3, multidim_average='samplewise')
624
+ tensor([0.6667, 0.0000])
625
+ >>> multilabel_recall(preds, target, num_labels=3, multidim_average='samplewise', average=None)
626
+ tensor([[1., 1., 0.],
627
+ [0., 0., 0.]])
628
+ """
629
+ if validate_args:
630
+ _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
631
+ _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
632
+ preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
633
+ tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
634
+ return _precision_recall_reduce("recall", tp, fp, tn, fn, average=average, multidim_average=multidim_average)
635
+
636
+
637
+ def precision(
638
+ preds: Tensor,
639
+ target: Tensor,
640
+ task: Literal["binary", "multiclass", "multilabel"],
641
+ threshold: float = 0.5,
642
+ num_classes: Optional[int] = None,
643
+ num_labels: Optional[int] = None,
644
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
645
+ multidim_average: Optional[Literal["global", "samplewise"]] = "global",
646
+ top_k: Optional[int] = 1,
647
+ ignore_index: Optional[int] = None,
648
+ validate_args: bool = True,
649
+ ) -> Tensor:
650
+ r"""Computes `Precision`_:
651
+
652
+ .. math:: \text{Precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}
653
+
654
+ Where :math:`\text{TP}` and :math:`\text{FP}` represent the number of true positives and
655
+ false positives respecitively.
656
+
657
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
658
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
659
+ :func:`binary_precision`, :func:`multiclass_precision` and :func:`multilabel_precision` for the specific details of
660
+ each argument influence and examples.
661
+
662
+ Legacy Example:
663
+ >>> preds = torch.tensor([2, 0, 2, 1])
664
+ >>> target = torch.tensor([1, 1, 2, 0])
665
+ >>> precision(preds, target, task="multiclass", average='macro', num_classes=3)
666
+ tensor(0.1667)
667
+ >>> precision(preds, target, task="multiclass", average='micro', num_classes=3)
668
+ tensor(0.2500)
669
+ """
670
+ assert multidim_average is not None
671
+ if task == "binary":
672
+ return binary_precision(preds, target, threshold, multidim_average, ignore_index, validate_args)
673
+ if task == "multiclass":
674
+ assert isinstance(num_classes, int)
675
+ assert isinstance(top_k, int)
676
+ return multiclass_precision(
677
+ preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
678
+ )
679
+ if task == "multilabel":
680
+ assert isinstance(num_labels, int)
681
+ return multilabel_precision(
682
+ preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
683
+ )
684
+ raise ValueError(
685
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
686
+ )
687
+
688
+
689
+ def recall(
690
+ preds: Tensor,
691
+ target: Tensor,
692
+ task: Literal["binary", "multiclass", "multilabel"],
693
+ threshold: float = 0.5,
694
+ num_classes: Optional[int] = None,
695
+ num_labels: Optional[int] = None,
696
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
697
+ multidim_average: Optional[Literal["global", "samplewise"]] = "global",
698
+ top_k: Optional[int] = 1,
699
+ ignore_index: Optional[int] = None,
700
+ validate_args: bool = True,
701
+ ) -> Tensor:
702
+ r"""Computes `Recall`_:
703
+
704
+ .. math:: \text{Recall} = \frac{\text{TP}}{\text{TP} + \text{FN}}
705
+
706
+ Where :math:`\text{TP}` and :math:`\text{FN}` represent the number of true positives and
707
+ false negatives respecitively.
708
+
709
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
710
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
711
+ :func:`binary_recall`, :func:`multiclass_recall` and :func:`multilabel_recall` for the specific details of
712
+ each argument influence and examples.
713
+
714
+ Legacy Example:
715
+ >>> preds = torch.tensor([2, 0, 2, 1])
716
+ >>> target = torch.tensor([1, 1, 2, 0])
717
+ >>> recall(preds, target, task="multiclass", average='macro', num_classes=3)
718
+ tensor(0.3333)
719
+ >>> recall(preds, target, task="multiclass", average='micro', num_classes=3)
720
+ tensor(0.2500)
721
+ """
722
+ assert multidim_average is not None
723
+ if task == "binary":
724
+ return binary_recall(preds, target, threshold, multidim_average, ignore_index, validate_args)
725
+ if task == "multiclass":
726
+ assert isinstance(num_classes, int)
727
+ assert isinstance(top_k, int)
728
+ return multiclass_recall(
729
+ preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
730
+ )
731
+ if task == "multilabel":
732
+ assert isinstance(num_labels, int)
733
+ return multilabel_recall(
734
+ preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
735
+ )
736
+ raise ValueError(
737
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
738
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/precision_recall_curve.py ADDED
@@ -0,0 +1,834 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import List, Optional, Sequence, Tuple, Union
16
+
17
+ import torch
18
+ from torch import Tensor, tensor
19
+ from torch.nn import functional as F
20
+ from typing_extensions import Literal
21
+
22
+ from torchmetrics.utilities.checks import _check_same_shape
23
+ from torchmetrics.utilities.compute import _safe_divide
24
+ from torchmetrics.utilities.data import _bincount
25
+
26
+
27
+ def _binary_clf_curve(
28
+ preds: Tensor,
29
+ target: Tensor,
30
+ sample_weights: Optional[Sequence] = None,
31
+ pos_label: int = 1,
32
+ ) -> Tuple[Tensor, Tensor, Tensor]:
33
+ """Calculates the tps and false positives for all unique thresholds in the preds tensor. Adapted from
34
+ https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/metrics/_ranking.py.
35
+
36
+ Args:
37
+ preds: 1d tensor with predictions
38
+ target: 1d tensor with true values
39
+ sample_weights: a 1d tensor with a weight per sample
40
+ pos_label: interger determining what the positive class in target tensor is
41
+
42
+ Returns:
43
+ fps: 1d tensor with false positives for different thresholds
44
+ tps: 1d tensor with true positives for different thresholds
45
+ thresholds: the unique thresholds use for calculating fps and tps
46
+ """
47
+ with torch.no_grad():
48
+ if sample_weights is not None and not isinstance(sample_weights, Tensor):
49
+ sample_weights = tensor(sample_weights, device=preds.device, dtype=torch.float)
50
+
51
+ # remove class dimension if necessary
52
+ if preds.ndim > target.ndim:
53
+ preds = preds[:, 0]
54
+ desc_score_indices = torch.argsort(preds, descending=True)
55
+
56
+ preds = preds[desc_score_indices]
57
+ target = target[desc_score_indices]
58
+
59
+ if sample_weights is not None:
60
+ weight = sample_weights[desc_score_indices]
61
+ else:
62
+ weight = 1.0
63
+
64
+ # pred typically has many tied values. Here we extract
65
+ # the indices associated with the distinct values. We also
66
+ # concatenate a value for the end of the curve.
67
+ distinct_value_indices = torch.where(preds[1:] - preds[:-1])[0]
68
+ threshold_idxs = F.pad(distinct_value_indices, [0, 1], value=target.size(0) - 1)
69
+ target = (target == pos_label).to(torch.long)
70
+ tps = torch.cumsum(target * weight, dim=0)[threshold_idxs]
71
+
72
+ if sample_weights is not None:
73
+ # express fps as a cumsum to ensure fps is increasing even in
74
+ # the presence of floating point errors
75
+ fps = torch.cumsum((1 - target) * weight, dim=0)[threshold_idxs]
76
+ else:
77
+ fps = 1 + threshold_idxs - tps
78
+
79
+ return fps, tps, preds[threshold_idxs]
80
+
81
+
82
+ def _adjust_threshold_arg(
83
+ thresholds: Optional[Union[int, List[float], Tensor]] = None, device: Optional[torch.device] = None
84
+ ) -> Optional[Tensor]:
85
+ """Utility function for converting the threshold arg for list and int to tensor format."""
86
+ if isinstance(thresholds, int):
87
+ thresholds = torch.linspace(0, 1, thresholds, device=device)
88
+ if isinstance(thresholds, list):
89
+ thresholds = torch.tensor(thresholds, device=device)
90
+ return thresholds
91
+
92
+
93
+ def _binary_precision_recall_curve_arg_validation(
94
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
95
+ ignore_index: Optional[int] = None,
96
+ ) -> None:
97
+ """Validate non tensor input.
98
+
99
+ - ``threshold`` has to be None | a 1d tensor | a list of floats in the [0,1] range | an int
100
+ - ``ignore_index`` has to be None or int
101
+ """
102
+ if thresholds is not None and not isinstance(thresholds, (list, int, Tensor)):
103
+ raise ValueError(
104
+ "Expected argument `thresholds` to either be an integer, list of floats or"
105
+ f" tensor of floats, but got {thresholds}"
106
+ )
107
+ else:
108
+ if isinstance(thresholds, int) and thresholds < 2:
109
+ raise ValueError(
110
+ f"If argument `thresholds` is an integer, expected it to be larger than 1, but got {thresholds}"
111
+ )
112
+ if isinstance(thresholds, list) and not all(isinstance(t, float) and 0 <= t <= 1 for t in thresholds):
113
+ raise ValueError(
114
+ "If argument `thresholds` is a list, expected all elements to be floats in the [0,1] range,"
115
+ f" but got {thresholds}"
116
+ )
117
+ if isinstance(thresholds, Tensor) and not thresholds.ndim == 1:
118
+ raise ValueError("If argument `thresholds` is an tensor, expected the tensor to be 1d")
119
+
120
+ if ignore_index is not None and not isinstance(ignore_index, int):
121
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
122
+
123
+
124
+ def _binary_precision_recall_curve_tensor_validation(
125
+ preds: Tensor, target: Tensor, ignore_index: Optional[int] = None
126
+ ) -> None:
127
+ """Validate tensor input.
128
+
129
+ - tensors have to be of same shape
130
+ - all values in target tensor that are not ignored have to be in {0, 1}
131
+ - that the pred tensor is floating point
132
+ """
133
+ _check_same_shape(preds, target)
134
+
135
+ if target.is_floating_point():
136
+ raise ValueError(
137
+ "Expected argument `target` to be an int or long tensor with ground truth labels"
138
+ f" but got tensor with dtype {target.dtype}"
139
+ )
140
+
141
+ if not preds.is_floating_point():
142
+ raise ValueError(
143
+ "Expected argument `preds` to be an floating tensor with probability/logit scores,"
144
+ f" but got tensor with dtype {preds.dtype}"
145
+ )
146
+
147
+ # Check that target only contains {0,1} values or value in ignore_index
148
+ unique_values = torch.unique(target)
149
+ if ignore_index is None:
150
+ check = torch.any((unique_values != 0) & (unique_values != 1))
151
+ else:
152
+ check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
153
+ if check:
154
+ raise RuntimeError(
155
+ f"Detected the following values in `target`: {unique_values} but expected only"
156
+ f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}."
157
+ )
158
+
159
+
160
+ def _binary_precision_recall_curve_format(
161
+ preds: Tensor,
162
+ target: Tensor,
163
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
164
+ ignore_index: Optional[int] = None,
165
+ ) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
166
+ """Convert all input to the right format.
167
+
168
+ - flattens additional dimensions
169
+ - Remove all datapoints that should be ignored
170
+ - Applies sigmoid if pred tensor not in [0,1] range
171
+ - Format thresholds arg to be a tensor
172
+ """
173
+ preds = preds.flatten()
174
+ target = target.flatten()
175
+ if ignore_index is not None:
176
+ idx = target != ignore_index
177
+ preds = preds[idx]
178
+ target = target[idx]
179
+
180
+ if not torch.all((0 <= preds) * (preds <= 1)):
181
+ preds = preds.sigmoid()
182
+
183
+ thresholds = _adjust_threshold_arg(thresholds, preds.device)
184
+ return preds, target, thresholds
185
+
186
+
187
+ def _binary_precision_recall_curve_update(
188
+ preds: Tensor,
189
+ target: Tensor,
190
+ thresholds: Optional[Tensor],
191
+ ) -> Union[Tensor, Tuple[Tensor, Tensor]]:
192
+ """Returns the state to calculate the pr-curve with.
193
+
194
+ If thresholds is `None` the direct preds and targets are used. If thresholds is not `None` we compute a multi
195
+ threshold confusion matrix.
196
+ """
197
+ if thresholds is None:
198
+ return preds, target
199
+ len_t = len(thresholds)
200
+ preds_t = (preds.unsqueeze(-1) >= thresholds.unsqueeze(0)).long() # num_samples x num_thresholds
201
+ unique_mapping = preds_t + 2 * target.unsqueeze(-1) + 4 * torch.arange(len_t, device=target.device)
202
+ bins = _bincount(unique_mapping.flatten(), minlength=4 * len_t)
203
+ return bins.reshape(len_t, 2, 2)
204
+
205
+
206
+ def _binary_precision_recall_curve_compute(
207
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
208
+ thresholds: Optional[Tensor],
209
+ pos_label: int = 1,
210
+ ) -> Tuple[Tensor, Tensor, Tensor]:
211
+ """Computes the final pr-curve.
212
+
213
+ If state is a single tensor, then we calculate the pr-curve from a multi threshold confusion matrix. If state is
214
+ original input, then we dynamically compute the binary classification curve.
215
+ """
216
+ if isinstance(state, Tensor):
217
+ tps = state[:, 1, 1]
218
+ fps = state[:, 0, 1]
219
+ fns = state[:, 1, 0]
220
+ precision = _safe_divide(tps, tps + fps)
221
+ recall = _safe_divide(tps, tps + fns)
222
+ precision = torch.cat([precision, torch.ones(1, dtype=precision.dtype, device=precision.device)])
223
+ recall = torch.cat([recall, torch.zeros(1, dtype=recall.dtype, device=recall.device)])
224
+ return precision, recall, thresholds
225
+ else:
226
+ fps, tps, thresholds = _binary_clf_curve(state[0], state[1], pos_label=pos_label)
227
+ precision = tps / (tps + fps)
228
+ recall = tps / tps[-1]
229
+
230
+ # stop when full recall attained and reverse the outputs so recall is decreasing
231
+ last_ind = torch.where(tps == tps[-1])[0][0]
232
+ sl = slice(0, last_ind.item() + 1)
233
+
234
+ # need to call reversed explicitly, since including that to slice would
235
+ # introduce negative strides that are not yet supported in pytorch
236
+ precision = torch.cat([reversed(precision[sl]), torch.ones(1, dtype=precision.dtype, device=precision.device)])
237
+ recall = torch.cat([reversed(recall[sl]), torch.zeros(1, dtype=recall.dtype, device=recall.device)])
238
+ thresholds = reversed(thresholds[sl]).detach().clone() # type: ignore
239
+
240
+ return precision, recall, thresholds
241
+
242
+
243
+ def binary_precision_recall_curve(
244
+ preds: Tensor,
245
+ target: Tensor,
246
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
247
+ ignore_index: Optional[int] = None,
248
+ validate_args: bool = True,
249
+ ) -> Tuple[Tensor, Tensor, Tensor]:
250
+ r"""Computes the precision-recall curve for binary tasks. The curve consist of multiple pairs of precision and
251
+ recall values evaluated at different thresholds, such that the tradeoff between the two values can been seen.
252
+
253
+ Accepts the following input tensors:
254
+
255
+ - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
256
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
257
+ sigmoid per element.
258
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
259
+ only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
260
+
261
+ Additional dimension ``...`` will be flattened into the batch dimension.
262
+
263
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
264
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
265
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
266
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
267
+ size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
268
+
269
+ Args:
270
+ preds: Tensor with predictions
271
+ target: Tensor with true labels
272
+ thresholds:
273
+ Can be one of:
274
+
275
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
276
+ all the data. Most accurate but also most memory consuming approach.
277
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
278
+ 0 to 1 as bins for the calculation.
279
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
280
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
281
+ bins for the calculation.
282
+
283
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
284
+ Set to ``False`` for faster computations.
285
+
286
+ Returns:
287
+ (tuple): a tuple of 3 tensors containing:
288
+
289
+ - precision: an 1d tensor of size (n_thresholds+1, ) with precision values
290
+ - recall: an 1d tensor of size (n_thresholds+1, ) with recall values
291
+ - thresholds: an 1d tensor of size (n_thresholds, ) with increasing threshold values
292
+
293
+ Example:
294
+ >>> from torchmetrics.functional.classification import binary_precision_recall_curve
295
+ >>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
296
+ >>> target = torch.tensor([0, 1, 1, 0])
297
+ >>> binary_precision_recall_curve(preds, target, thresholds=None) # doctest: +NORMALIZE_WHITESPACE
298
+ (tensor([0.6667, 0.5000, 0.0000, 1.0000]),
299
+ tensor([1.0000, 0.5000, 0.0000, 0.0000]),
300
+ tensor([0.5000, 0.7000, 0.8000]))
301
+ >>> binary_precision_recall_curve(preds, target, thresholds=5) # doctest: +NORMALIZE_WHITESPACE
302
+ (tensor([0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000]),
303
+ tensor([1., 1., 1., 0., 0., 0.]),
304
+ tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
305
+ """
306
+ if validate_args:
307
+ _binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
308
+ _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
309
+ preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
310
+ state = _binary_precision_recall_curve_update(preds, target, thresholds)
311
+ return _binary_precision_recall_curve_compute(state, thresholds)
312
+
313
+
314
+ def _multiclass_precision_recall_curve_arg_validation(
315
+ num_classes: int,
316
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
317
+ ignore_index: Optional[int] = None,
318
+ ) -> None:
319
+ """Validate non tensor input.
320
+
321
+ - ``num_classes`` has to be an int larger than 1
322
+ - ``threshold`` has to be None | a 1d tensor | a list of floats in the [0,1] range | an int
323
+ - ``ignore_index`` has to be None or int
324
+ """
325
+ if not isinstance(num_classes, int) or num_classes < 2:
326
+ raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
327
+ _binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
328
+
329
+
330
+ def _multiclass_precision_recall_curve_tensor_validation(
331
+ preds: Tensor, target: Tensor, num_classes: int, ignore_index: Optional[int] = None
332
+ ) -> None:
333
+ """Validate tensor input.
334
+
335
+ - target should have one more dimension than preds and all dimensions except for preds.shape[1] should match
336
+ exactly. preds.shape[1] should have size equal to number of classes
337
+ - all values in target tensor that are not ignored have to be in {0, 1}
338
+ """
339
+ if not preds.ndim == target.ndim + 1:
340
+ raise ValueError(
341
+ f"Expected `preds` to have one more dimension than `target` but got {preds.ndim} and {target.ndim}"
342
+ )
343
+ if target.is_floating_point():
344
+ raise ValueError(
345
+ f"Expected argument `target` to be an int or long tensor, but got tensor with dtype {target.dtype}"
346
+ )
347
+ if not preds.is_floating_point():
348
+ raise ValueError(f"Expected `preds` to be a float tensor, but got {preds.dtype}")
349
+ if preds.shape[1] != num_classes:
350
+ raise ValueError(
351
+ "Expected `preds.shape[1]` to be equal to the number of classes but"
352
+ f" got {preds.shape[1]} and {num_classes}."
353
+ )
354
+ if preds.shape[0] != target.shape[0] or preds.shape[2:] != target.shape[1:]:
355
+ raise ValueError(
356
+ "Expected the shape of `preds` should be (N, C, ...) and the shape of `target` should be (N, ...)"
357
+ f" but got {preds.shape} and {target.shape}"
358
+ )
359
+
360
+ num_unique_values = len(torch.unique(target))
361
+ if ignore_index is None:
362
+ check = num_unique_values > num_classes
363
+ else:
364
+ check = num_unique_values > num_classes + 1
365
+ if check:
366
+ raise RuntimeError(
367
+ "Detected more unique values in `target` than `num_classes`. Expected only "
368
+ f"{num_classes if ignore_index is None else num_classes + 1} but found "
369
+ f"{num_unique_values} in `target`."
370
+ )
371
+
372
+
373
+ def _multiclass_precision_recall_curve_format(
374
+ preds: Tensor,
375
+ target: Tensor,
376
+ num_classes: int,
377
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
378
+ ignore_index: Optional[int] = None,
379
+ ) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
380
+ """Convert all input to the right format.
381
+
382
+ - flattens additional dimensions
383
+ - Remove all datapoints that should be ignored
384
+ - Applies softmax if pred tensor not in [0,1] range
385
+ - Format thresholds arg to be a tensor
386
+ """
387
+ preds = preds.transpose(0, 1).reshape(num_classes, -1).T
388
+ target = target.flatten()
389
+
390
+ if ignore_index is not None:
391
+ idx = target != ignore_index
392
+ preds = preds[idx]
393
+ target = target[idx]
394
+
395
+ if not torch.all((0 <= preds) * (preds <= 1)):
396
+ preds = preds.softmax(1)
397
+
398
+ thresholds = _adjust_threshold_arg(thresholds, preds.device)
399
+ return preds, target, thresholds
400
+
401
+
402
+ def _multiclass_precision_recall_curve_update(
403
+ preds: Tensor,
404
+ target: Tensor,
405
+ num_classes: int,
406
+ thresholds: Optional[Tensor],
407
+ ) -> Union[Tensor, Tuple[Tensor, Tensor]]:
408
+ """Returns the state to calculate the pr-curve with.
409
+
410
+ If thresholds is `None` the direct preds and targets are used. If thresholds is not `None` we compute a multi
411
+ threshold confusion matrix.
412
+ """
413
+ if thresholds is None:
414
+ return preds, target
415
+ len_t = len(thresholds)
416
+ # num_samples x num_classes x num_thresholds
417
+ preds_t = (preds.unsqueeze(-1) >= thresholds.unsqueeze(0).unsqueeze(0)).long()
418
+ target_t = torch.nn.functional.one_hot(target, num_classes=num_classes)
419
+ unique_mapping = preds_t + 2 * target_t.unsqueeze(-1)
420
+ unique_mapping += 4 * torch.arange(num_classes, device=preds.device).unsqueeze(0).unsqueeze(-1)
421
+ unique_mapping += 4 * num_classes * torch.arange(len_t, device=preds.device)
422
+ bins = _bincount(unique_mapping.flatten(), minlength=4 * num_classes * len_t)
423
+ return bins.reshape(len_t, num_classes, 2, 2)
424
+
425
+
426
+ def _multiclass_precision_recall_curve_compute(
427
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
428
+ num_classes: int,
429
+ thresholds: Optional[Tensor],
430
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
431
+ """Computes the final pr-curve.
432
+
433
+ If state is a single tensor, then we calculate the pr-curve from a multi threshold confusion matrix. If state is
434
+ original input, then we dynamically compute the binary classification curve in an iterative way.
435
+ """
436
+ if isinstance(state, Tensor):
437
+ tps = state[:, :, 1, 1]
438
+ fps = state[:, :, 0, 1]
439
+ fns = state[:, :, 1, 0]
440
+ precision = _safe_divide(tps, tps + fps)
441
+ recall = _safe_divide(tps, tps + fns)
442
+ precision = torch.cat([precision, torch.ones(1, num_classes, dtype=precision.dtype, device=precision.device)])
443
+ recall = torch.cat([recall, torch.zeros(1, num_classes, dtype=recall.dtype, device=recall.device)])
444
+ return precision.T, recall.T, thresholds
445
+ else:
446
+ precision, recall, thresholds = [], [], []
447
+ for i in range(num_classes):
448
+ res = _binary_precision_recall_curve_compute([state[0][:, i], state[1]], thresholds=None, pos_label=i)
449
+ precision.append(res[0])
450
+ recall.append(res[1])
451
+ thresholds.append(res[2])
452
+ return precision, recall, thresholds
453
+
454
+
455
+ def multiclass_precision_recall_curve(
456
+ preds: Tensor,
457
+ target: Tensor,
458
+ num_classes: int,
459
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
460
+ ignore_index: Optional[int] = None,
461
+ validate_args: bool = True,
462
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
463
+ r"""Computes the precision-recall curve for multiclass tasks. The curve consist of multiple pairs of precision
464
+ and recall values evaluated at different thresholds, such that the tradeoff between the two values can been
465
+ seen.
466
+
467
+ Accepts the following input tensors:
468
+
469
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
470
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
471
+ softmax per sample.
472
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
473
+ only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
474
+
475
+ Additional dimension ``...`` will be flattened into the batch dimension.
476
+
477
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
478
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
479
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
480
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
481
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
482
+
483
+ Args:
484
+ preds: Tensor with predictions
485
+ target: Tensor with true labels
486
+ num_classes: Integer specifing the number of classes
487
+ thresholds:
488
+ Can be one of:
489
+
490
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
491
+ all the data. Most accurate but also most memory consuming approach.
492
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
493
+ 0 to 1 as bins for the calculation.
494
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
495
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
496
+ bins for the calculation.
497
+
498
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
499
+ Set to ``False`` for faster computations.
500
+
501
+ Returns:
502
+ (tuple): a tuple of either 3 tensors or 3 lists containing
503
+
504
+ - precision: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, )
505
+ with precision values (length may differ between classes). If `thresholds` is set to something else,
506
+ then a single 2d tensor of size (n_classes, n_thresholds+1) with precision values is returned.
507
+ - recall: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, )
508
+ with recall values (length may differ between classes). If `thresholds` is set to something else,
509
+ then a single 2d tensor of size (n_classes, n_thresholds+1) with recall values is returned.
510
+ - thresholds: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds, )
511
+ with increasing threshold values (length may differ between classes). If `threshold` is set to something else,
512
+ then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all classes.
513
+
514
+ Example:
515
+ >>> from torchmetrics.functional.classification import multiclass_precision_recall_curve
516
+ >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
517
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
518
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
519
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
520
+ >>> target = torch.tensor([0, 1, 3, 2])
521
+ >>> precision, recall, thresholds = multiclass_precision_recall_curve(
522
+ ... preds, target, num_classes=5, thresholds=None
523
+ ... )
524
+ >>> precision # doctest: +NORMALIZE_WHITESPACE
525
+ [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]),
526
+ tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])]
527
+ >>> recall
528
+ [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])]
529
+ >>> thresholds
530
+ [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])]
531
+ >>> multiclass_precision_recall_curve(
532
+ ... preds, target, num_classes=5, thresholds=5
533
+ ... ) # doctest: +NORMALIZE_WHITESPACE
534
+ (tensor([[0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000],
535
+ [0.2500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000],
536
+ [0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
537
+ [0.2500, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
538
+ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
539
+ tensor([[1., 1., 1., 1., 0., 0.],
540
+ [1., 1., 1., 1., 0., 0.],
541
+ [1., 0., 0., 0., 0., 0.],
542
+ [1., 0., 0., 0., 0., 0.],
543
+ [0., 0., 0., 0., 0., 0.]]),
544
+ tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
545
+ """
546
+ if validate_args:
547
+ _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)
548
+ _multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
549
+ preds, target, thresholds = _multiclass_precision_recall_curve_format(
550
+ preds, target, num_classes, thresholds, ignore_index
551
+ )
552
+ state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)
553
+ return _multiclass_precision_recall_curve_compute(state, num_classes, thresholds)
554
+
555
+
556
+ def _multilabel_precision_recall_curve_arg_validation(
557
+ num_labels: int,
558
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
559
+ ignore_index: Optional[int] = None,
560
+ ) -> None:
561
+ """Validate non tensor input.
562
+
563
+ - ``num_labels`` has to be an int larger than 1
564
+ - ``threshold`` has to be None | a 1d tensor | a list of floats in the [0,1] range | an int
565
+ - ``ignore_index`` has to be None or int
566
+ """
567
+ _multiclass_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
568
+
569
+
570
+ def _multilabel_precision_recall_curve_tensor_validation(
571
+ preds: Tensor, target: Tensor, num_labels: int, ignore_index: Optional[int] = None
572
+ ) -> None:
573
+ """Validate tensor input.
574
+
575
+ - tensors have to be of same shape
576
+ - preds.shape[1] is equal to the number of labels
577
+ - all values in target tensor that are not ignored have to be in {0, 1}
578
+ - that the pred tensor is floating point
579
+ """
580
+ _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
581
+ if preds.shape[1] != num_labels:
582
+ raise ValueError(
583
+ "Expected both `target.shape[1]` and `preds.shape[1]` to be equal to the number of labels"
584
+ f" but got {preds.shape[1]} and expected {num_labels}"
585
+ )
586
+
587
+
588
+ def _multilabel_precision_recall_curve_format(
589
+ preds: Tensor,
590
+ target: Tensor,
591
+ num_labels: int,
592
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
593
+ ignore_index: Optional[int] = None,
594
+ ) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
595
+ """Convert all input to the right format.
596
+
597
+ - flattens additional dimensions
598
+ - Mask all datapoints that should be ignored with negative values
599
+ - Applies sigmoid if pred tensor not in [0,1] range
600
+ - Format thresholds arg to be a tensor
601
+ """
602
+ preds = preds.transpose(0, 1).reshape(num_labels, -1).T
603
+ target = target.transpose(0, 1).reshape(num_labels, -1).T
604
+ if not torch.all((0 <= preds) * (preds <= 1)):
605
+ preds = preds.sigmoid()
606
+
607
+ thresholds = _adjust_threshold_arg(thresholds, preds.device)
608
+ if ignore_index is not None and thresholds is not None:
609
+ preds = preds.clone()
610
+ target = target.clone()
611
+ # Make sure that when we map, it will always result in a negative number that we can filter away
612
+ idx = target == ignore_index
613
+ preds[idx] = -4 * num_labels * (len(thresholds) if thresholds is not None else 1)
614
+ target[idx] = -4 * num_labels * (len(thresholds) if thresholds is not None else 1)
615
+
616
+ return preds, target, thresholds
617
+
618
+
619
+ def _multilabel_precision_recall_curve_update(
620
+ preds: Tensor,
621
+ target: Tensor,
622
+ num_labels: int,
623
+ thresholds: Optional[Tensor],
624
+ ) -> Union[Tensor, Tuple[Tensor, Tensor]]:
625
+ """Returns the state to calculate the pr-curve with.
626
+
627
+ If thresholds is `None` the direct preds and targets are used. If thresholds is not `None` we compute a multi
628
+ threshold confusion matrix.
629
+ """
630
+ if thresholds is None:
631
+ return preds, target
632
+ len_t = len(thresholds)
633
+ # num_samples x num_labels x num_thresholds
634
+ preds_t = (preds.unsqueeze(-1) >= thresholds.unsqueeze(0).unsqueeze(0)).long()
635
+ unique_mapping = preds_t + 2 * target.unsqueeze(-1)
636
+ unique_mapping += 4 * torch.arange(num_labels, device=preds.device).unsqueeze(0).unsqueeze(-1)
637
+ unique_mapping += 4 * num_labels * torch.arange(len_t, device=preds.device)
638
+ unique_mapping = unique_mapping[unique_mapping >= 0]
639
+ bins = _bincount(unique_mapping, minlength=4 * num_labels * len_t)
640
+ return bins.reshape(len_t, num_labels, 2, 2)
641
+
642
+
643
+ def _multilabel_precision_recall_curve_compute(
644
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
645
+ num_labels: int,
646
+ thresholds: Optional[Tensor],
647
+ ignore_index: Optional[int] = None,
648
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
649
+ """Computes the final pr-curve.
650
+
651
+ If state is a single tensor, then we calculate the pr-curve from a multi threshold confusion matrix. If state is
652
+ original input, then we dynamically compute the binary classification curve in an iterative way.
653
+ """
654
+ if isinstance(state, Tensor):
655
+ tps = state[:, :, 1, 1]
656
+ fps = state[:, :, 0, 1]
657
+ fns = state[:, :, 1, 0]
658
+ precision = _safe_divide(tps, tps + fps)
659
+ recall = _safe_divide(tps, tps + fns)
660
+ precision = torch.cat([precision, torch.ones(1, num_labels, dtype=precision.dtype, device=precision.device)])
661
+ recall = torch.cat([recall, torch.zeros(1, num_labels, dtype=recall.dtype, device=recall.device)])
662
+ return precision.T, recall.T, thresholds
663
+ else:
664
+ precision, recall, thresholds = [], [], []
665
+ for i in range(num_labels):
666
+ preds = state[0][:, i]
667
+ target = state[1][:, i]
668
+ if ignore_index is not None:
669
+ idx = target == ignore_index
670
+ preds = preds[~idx]
671
+ target = target[~idx]
672
+ res = _binary_precision_recall_curve_compute([preds, target], thresholds=None, pos_label=1)
673
+ precision.append(res[0])
674
+ recall.append(res[1])
675
+ thresholds.append(res[2])
676
+ return precision, recall, thresholds
677
+
678
+
679
+ def multilabel_precision_recall_curve(
680
+ preds: Tensor,
681
+ target: Tensor,
682
+ num_labels: int,
683
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
684
+ ignore_index: Optional[int] = None,
685
+ validate_args: bool = True,
686
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
687
+ r"""Computes the precision-recall curve for multilabel tasks. The curve consist of multiple pairs of precision
688
+ and recall values evaluated at different thresholds, such that the tradeoff between the two values can been
689
+ seen.
690
+
691
+ Accepts the following input tensors:
692
+
693
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
694
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
695
+ sigmoid per element.
696
+ - ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
697
+ only contain {0,1} values (except if `ignore_index` is specified).
698
+
699
+ Additional dimension ``...`` will be flattened into the batch dimension.
700
+
701
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
702
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
703
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
704
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
705
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
706
+
707
+ Args:
708
+ preds: Tensor with predictions
709
+ target: Tensor with true labels
710
+ num_labels: Integer specifing the number of labels
711
+ thresholds:
712
+ Can be one of:
713
+
714
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
715
+ all the data. Most accurate but also most memory consuming approach.
716
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
717
+ 0 to 1 as bins for the calculation.
718
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
719
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
720
+ bins for the calculation.
721
+
722
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
723
+ Set to ``False`` for faster computations.
724
+
725
+ Returns:
726
+ (tuple): a tuple of either 3 tensors or 3 lists containing
727
+
728
+ - precision: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, )
729
+ with precision values (length may differ between labels). If `thresholds` is set to something else,
730
+ then a single 2d tensor of size (n_labels, n_thresholds+1) with precision values is returned.
731
+ - recall: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, )
732
+ with recall values (length may differ between labels). If `thresholds` is set to something else,
733
+ then a single 2d tensor of size (n_labels, n_thresholds+1) with recall values is returned.
734
+ - thresholds: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds, )
735
+ with increasing threshold values (length may differ between labels). If `threshold` is set to something else,
736
+ then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all labels.
737
+
738
+ Example:
739
+ >>> from torchmetrics.functional.classification import multilabel_precision_recall_curve
740
+ >>> preds = torch.tensor([[0.75, 0.05, 0.35],
741
+ ... [0.45, 0.75, 0.05],
742
+ ... [0.05, 0.55, 0.75],
743
+ ... [0.05, 0.65, 0.05]])
744
+ >>> target = torch.tensor([[1, 0, 1],
745
+ ... [0, 0, 0],
746
+ ... [0, 1, 1],
747
+ ... [1, 1, 1]])
748
+ >>> precision, recall, thresholds = multilabel_precision_recall_curve(
749
+ ... preds, target, num_labels=3, thresholds=None
750
+ ... )
751
+ >>> precision # doctest: +NORMALIZE_WHITESPACE
752
+ [tensor([0.5000, 0.5000, 1.0000, 1.0000]), tensor([0.6667, 0.5000, 0.0000, 1.0000]),
753
+ tensor([0.7500, 1.0000, 1.0000, 1.0000])]
754
+ >>> recall # doctest: +NORMALIZE_WHITESPACE
755
+ [tensor([1.0000, 0.5000, 0.5000, 0.0000]), tensor([1.0000, 0.5000, 0.0000, 0.0000]),
756
+ tensor([1.0000, 0.6667, 0.3333, 0.0000])]
757
+ >>> thresholds # doctest: +NORMALIZE_WHITESPACE
758
+ [tensor([0.0500, 0.4500, 0.7500]), tensor([0.5500, 0.6500, 0.7500]),
759
+ tensor([0.0500, 0.3500, 0.7500])]
760
+ >>> multilabel_precision_recall_curve(
761
+ ... preds, target, num_labels=3, thresholds=5
762
+ ... ) # doctest: +NORMALIZE_WHITESPACE
763
+ (tensor([[0.5000, 0.5000, 1.0000, 1.0000, 0.0000, 1.0000],
764
+ [0.5000, 0.6667, 0.6667, 0.0000, 0.0000, 1.0000],
765
+ [0.7500, 1.0000, 1.0000, 1.0000, 0.0000, 1.0000]]),
766
+ tensor([[1.0000, 0.5000, 0.5000, 0.5000, 0.0000, 0.0000],
767
+ [1.0000, 1.0000, 1.0000, 0.0000, 0.0000, 0.0000],
768
+ [1.0000, 0.6667, 0.3333, 0.3333, 0.0000, 0.0000]]),
769
+ tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]))
770
+ """
771
+ if validate_args:
772
+ _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
773
+ _multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
774
+ preds, target, thresholds = _multilabel_precision_recall_curve_format(
775
+ preds, target, num_labels, thresholds, ignore_index
776
+ )
777
+ state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
778
+ return _multilabel_precision_recall_curve_compute(state, num_labels, thresholds, ignore_index)
779
+
780
+
781
+ def precision_recall_curve(
782
+ preds: Tensor,
783
+ target: Tensor,
784
+ task: Literal["binary", "multiclass", "multilabel"],
785
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
786
+ num_classes: Optional[int] = None,
787
+ num_labels: Optional[int] = None,
788
+ ignore_index: Optional[int] = None,
789
+ validate_args: bool = True,
790
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
791
+ r"""Computes the precision-recall curve. The curve consist of multiple pairs of precision and recall values
792
+ evaluated at different thresholds, such that the tradeoff between the two values can been seen.
793
+
794
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
795
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
796
+ :func:`binary_precision_recall_curve`, :func:`multiclass_precision_recall_curve` and
797
+ :func:`multilabel_precision_recall_curve` for the specific details of each argument influence and examples.
798
+
799
+ Legacy Example:
800
+ >>> pred = torch.tensor([0.0, 1.0, 2.0, 3.0])
801
+ >>> target = torch.tensor([0, 1, 1, 0])
802
+ >>> precision, recall, thresholds = precision_recall_curve(pred, target, task='binary')
803
+ >>> precision
804
+ tensor([0.6667, 0.5000, 0.0000, 1.0000])
805
+ >>> recall
806
+ tensor([1.0000, 0.5000, 0.0000, 0.0000])
807
+ >>> thresholds
808
+ tensor([0.7311, 0.8808, 0.9526])
809
+
810
+ >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
811
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
812
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
813
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
814
+ >>> target = torch.tensor([0, 1, 3, 2])
815
+ >>> precision, recall, thresholds = precision_recall_curve(pred, target, task='multiclass', num_classes=5)
816
+ >>> precision
817
+ [tensor([1., 1.]), tensor([1., 1.]), tensor([0.2500, 0.0000, 1.0000]),
818
+ tensor([0.2500, 0.0000, 1.0000]), tensor([0., 1.])]
819
+ >>> recall
820
+ [tensor([1., 0.]), tensor([1., 0.]), tensor([1., 0., 0.]), tensor([1., 0., 0.]), tensor([nan, 0.])]
821
+ >>> thresholds
822
+ [tensor([0.7500]), tensor([0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500, 0.7500]), tensor([0.0500])]
823
+ """
824
+ if task == "binary":
825
+ return binary_precision_recall_curve(preds, target, thresholds, ignore_index, validate_args)
826
+ if task == "multiclass":
827
+ assert isinstance(num_classes, int)
828
+ return multiclass_precision_recall_curve(preds, target, num_classes, thresholds, ignore_index, validate_args)
829
+ if task == "multilabel":
830
+ assert isinstance(num_labels, int)
831
+ return multilabel_precision_recall_curve(preds, target, num_labels, thresholds, ignore_index, validate_args)
832
+ raise ValueError(
833
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
834
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/recall_at_fixed_precision.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import List, Optional, Tuple, Union
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.precision_recall_curve import (
21
+ _binary_precision_recall_curve_arg_validation,
22
+ _binary_precision_recall_curve_compute,
23
+ _binary_precision_recall_curve_format,
24
+ _binary_precision_recall_curve_tensor_validation,
25
+ _binary_precision_recall_curve_update,
26
+ _multiclass_precision_recall_curve_arg_validation,
27
+ _multiclass_precision_recall_curve_compute,
28
+ _multiclass_precision_recall_curve_format,
29
+ _multiclass_precision_recall_curve_tensor_validation,
30
+ _multiclass_precision_recall_curve_update,
31
+ _multilabel_precision_recall_curve_arg_validation,
32
+ _multilabel_precision_recall_curve_compute,
33
+ _multilabel_precision_recall_curve_format,
34
+ _multilabel_precision_recall_curve_tensor_validation,
35
+ _multilabel_precision_recall_curve_update,
36
+ )
37
+
38
+
39
+ def _recall_at_precision(
40
+ precision: Tensor,
41
+ recall: Tensor,
42
+ thresholds: Tensor,
43
+ min_precision: float,
44
+ ) -> Tuple[Tensor, Tensor]:
45
+ try:
46
+ max_recall, _, best_threshold = max(
47
+ (r, p, t) for p, r, t in zip(precision, recall, thresholds) if p >= min_precision
48
+ )
49
+
50
+ except ValueError:
51
+ max_recall = torch.tensor(0.0, device=recall.device, dtype=recall.dtype)
52
+ best_threshold = torch.tensor(0)
53
+
54
+ if max_recall == 0.0:
55
+ best_threshold = torch.tensor(1e6, device=thresholds.device, dtype=thresholds.dtype)
56
+
57
+ return max_recall, best_threshold
58
+
59
+
60
+ def _binary_recall_at_fixed_precision_arg_validation(
61
+ min_precision: float,
62
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
63
+ ignore_index: Optional[int] = None,
64
+ ) -> None:
65
+ _binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
66
+ if not isinstance(min_precision, float) and not (0 <= min_precision <= 1):
67
+ raise ValueError(
68
+ f"Expected argument `min_precision` to be an float in the [0,1] range, but got {min_precision}"
69
+ )
70
+
71
+
72
+ def _binary_recall_at_fixed_precision_compute(
73
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
74
+ thresholds: Optional[Tensor],
75
+ min_precision: float,
76
+ pos_label: int = 1,
77
+ ) -> Tuple[Tensor, Tensor]:
78
+ precision, recall, thresholds = _binary_precision_recall_curve_compute(state, thresholds, pos_label)
79
+ return _recall_at_precision(precision, recall, thresholds, min_precision)
80
+
81
+
82
+ def binary_recall_at_fixed_precision(
83
+ preds: Tensor,
84
+ target: Tensor,
85
+ min_precision: float,
86
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
87
+ ignore_index: Optional[int] = None,
88
+ validate_args: bool = True,
89
+ ) -> Tuple[Tensor, Tensor]:
90
+ r"""Computes the highest possible recall value given the minimum precision thresholds provided for binary tasks.
91
+ This is done by first calculating the precision-recall curve for different thresholds and the find the recall
92
+ for a given precision level.
93
+
94
+ Accepts the following input tensors:
95
+
96
+ - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
97
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
98
+ sigmoid per element.
99
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
100
+ only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
101
+
102
+ Additional dimension ``...`` will be flattened into the batch dimension.
103
+
104
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
105
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
106
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
107
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
108
+ size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
109
+
110
+ Args:
111
+ preds: Tensor with predictions
112
+ target: Tensor with true labels
113
+ min_precision: float value specifying minimum precision threshold.
114
+ thresholds:
115
+ Can be one of:
116
+
117
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
118
+ all the data. Most accurate but also most memory consuming approach.
119
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
120
+ 0 to 1 as bins for the calculation.
121
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
122
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
123
+ bins for the calculation.
124
+
125
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
126
+ Set to ``False`` for faster computations.
127
+
128
+ Returns:
129
+ (tuple): a tuple of 2 tensors containing:
130
+
131
+ - recall: an scalar tensor with the maximum recall for the given precision level
132
+ - threshold: an scalar tensor with the corresponding threshold level
133
+
134
+ Example:
135
+ >>> from torchmetrics.functional.classification import binary_recall_at_fixed_precision
136
+ >>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
137
+ >>> target = torch.tensor([0, 1, 1, 0])
138
+ >>> binary_recall_at_fixed_precision(preds, target, min_precision=0.5, thresholds=None)
139
+ (tensor(1.), tensor(0.5000))
140
+ >>> binary_recall_at_fixed_precision(preds, target, min_precision=0.5, thresholds=5)
141
+ (tensor(1.), tensor(0.5000))
142
+ """
143
+ if validate_args:
144
+ _binary_recall_at_fixed_precision_arg_validation(min_precision, thresholds, ignore_index)
145
+ _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
146
+ preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
147
+ state = _binary_precision_recall_curve_update(preds, target, thresholds)
148
+ return _binary_recall_at_fixed_precision_compute(state, thresholds, min_precision)
149
+
150
+
151
+ def _multiclass_recall_at_fixed_precision_arg_validation(
152
+ num_classes: int,
153
+ min_precision: float,
154
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
155
+ ignore_index: Optional[int] = None,
156
+ ) -> None:
157
+ _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)
158
+ if not isinstance(min_precision, float) and not (0 <= min_precision <= 1):
159
+ raise ValueError(
160
+ f"Expected argument `min_precision` to be an float in the [0,1] range, but got {min_precision}"
161
+ )
162
+
163
+
164
+ def _multiclass_recall_at_fixed_precision_arg_compute(
165
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
166
+ num_classes: int,
167
+ thresholds: Optional[Tensor],
168
+ min_precision: float,
169
+ ) -> Tuple[Tensor, Tensor]:
170
+ precision, recall, thresholds = _multiclass_precision_recall_curve_compute(state, num_classes, thresholds)
171
+ if isinstance(state, Tensor):
172
+ res = [_recall_at_precision(p, r, thresholds, min_precision) for p, r in zip(precision, recall)]
173
+ else:
174
+ res = [_recall_at_precision(p, r, t, min_precision) for p, r, t in zip(precision, recall, thresholds)]
175
+ recall = torch.stack([r[0] for r in res])
176
+ thresholds = torch.stack([r[1] for r in res])
177
+ return recall, thresholds
178
+
179
+
180
+ def multiclass_recall_at_fixed_precision(
181
+ preds: Tensor,
182
+ target: Tensor,
183
+ num_classes: int,
184
+ min_precision: float,
185
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
186
+ ignore_index: Optional[int] = None,
187
+ validate_args: bool = True,
188
+ ) -> Tuple[Tensor, Tensor]:
189
+ r"""Computes the highest possible recall value given the minimum precision thresholds provided for multiclass
190
+ tasks. This is done by first calculating the precision-recall curve for different thresholds and the find the
191
+ recall for a given precision level.
192
+
193
+ Accepts the following input tensors:
194
+
195
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
196
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
197
+ softmax per sample.
198
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
199
+ only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
200
+
201
+ Additional dimension ``...`` will be flattened into the batch dimension.
202
+
203
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
204
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
205
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
206
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
207
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
208
+
209
+ Args:
210
+ preds: Tensor with predictions
211
+ target: Tensor with true labels
212
+ num_classes: Integer specifing the number of classes
213
+ min_precision: float value specifying minimum precision threshold.
214
+ thresholds:
215
+ Can be one of:
216
+
217
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
218
+ all the data. Most accurate but also most memory consuming approach.
219
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
220
+ 0 to 1 as bins for the calculation.
221
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
222
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
223
+ bins for the calculation.
224
+
225
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
226
+ Set to ``False`` for faster computations.
227
+
228
+ Returns:
229
+ (tuple): a tuple of either 2 tensors or 2 lists containing
230
+
231
+ - recall: an 1d tensor of size (n_classes, ) with the maximum recall for the given precision level per class
232
+ - thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
233
+
234
+ Example:
235
+ >>> from torchmetrics.functional.classification import multiclass_recall_at_fixed_precision
236
+ >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
237
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
238
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
239
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
240
+ >>> target = torch.tensor([0, 1, 3, 2])
241
+ >>> multiclass_recall_at_fixed_precision(preds, target, num_classes=5, min_precision=0.5, thresholds=None)
242
+ (tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06]))
243
+ >>> multiclass_recall_at_fixed_precision(preds, target, num_classes=5, min_precision=0.5, thresholds=5)
244
+ (tensor([1., 1., 0., 0., 0.]), tensor([7.5000e-01, 7.5000e-01, 1.0000e+06, 1.0000e+06, 1.0000e+06]))
245
+ """
246
+ if validate_args:
247
+ _multiclass_recall_at_fixed_precision_arg_validation(num_classes, min_precision, thresholds, ignore_index)
248
+ _multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
249
+ preds, target, thresholds = _multiclass_precision_recall_curve_format(
250
+ preds, target, num_classes, thresholds, ignore_index
251
+ )
252
+ state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)
253
+ return _multiclass_recall_at_fixed_precision_arg_compute(state, num_classes, thresholds, min_precision)
254
+
255
+
256
+ def _multilabel_recall_at_fixed_precision_arg_validation(
257
+ num_labels: int,
258
+ min_precision: float,
259
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
260
+ ignore_index: Optional[int] = None,
261
+ ) -> None:
262
+ _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
263
+ if not isinstance(min_precision, float) and not (0 <= min_precision <= 1):
264
+ raise ValueError(
265
+ f"Expected argument `min_precision` to be an float in the [0,1] range, but got {min_precision}"
266
+ )
267
+
268
+
269
+ def _multilabel_recall_at_fixed_precision_arg_compute(
270
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
271
+ num_labels: int,
272
+ thresholds: Optional[Tensor],
273
+ ignore_index: Optional[int],
274
+ min_precision: float,
275
+ ) -> Tuple[Tensor, Tensor]:
276
+ precision, recall, thresholds = _multilabel_precision_recall_curve_compute(
277
+ state, num_labels, thresholds, ignore_index
278
+ )
279
+ if isinstance(state, Tensor):
280
+ res = [_recall_at_precision(p, r, thresholds, min_precision) for p, r in zip(precision, recall)]
281
+ else:
282
+ res = [_recall_at_precision(p, r, t, min_precision) for p, r, t in zip(precision, recall, thresholds)]
283
+ recall = torch.stack([r[0] for r in res])
284
+ thresholds = torch.stack([r[1] for r in res])
285
+ return recall, thresholds
286
+
287
+
288
+ def multilabel_recall_at_fixed_precision(
289
+ preds: Tensor,
290
+ target: Tensor,
291
+ num_labels: int,
292
+ min_precision: float,
293
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
294
+ ignore_index: Optional[int] = None,
295
+ validate_args: bool = True,
296
+ ) -> Tuple[Tensor, Tensor]:
297
+ r"""Computes the highest possible recall value given the minimum precision thresholds provided for multilabel
298
+ tasks. This is done by first calculating the precision-recall curve for different thresholds and the find the
299
+ recall for a given precision level.
300
+
301
+ Accepts the following input tensors:
302
+
303
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
304
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
305
+ sigmoid per element.
306
+ - ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
307
+ only contain {0,1} values (except if `ignore_index` is specified).
308
+
309
+ Additional dimension ``...`` will be flattened into the batch dimension.
310
+
311
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
312
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
313
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
314
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
315
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
316
+
317
+ Args:
318
+ preds: Tensor with predictions
319
+ target: Tensor with true labels
320
+ num_labels: Integer specifing the number of labels
321
+ min_precision: float value specifying minimum precision threshold.
322
+ thresholds:
323
+ Can be one of:
324
+
325
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
326
+ all the data. Most accurate but also most memory consuming approach.
327
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
328
+ 0 to 1 as bins for the calculation.
329
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
330
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
331
+ bins for the calculation.
332
+
333
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
334
+ Set to ``False`` for faster computations.
335
+
336
+ Returns:
337
+ (tuple): a tuple of either 2 tensors or 2 lists containing
338
+
339
+ - recall: an 1d tensor of size (n_classes, ) with the maximum recall for the given precision level per class
340
+ - thresholds: an 1d tensor of size (n_classes, ) with the corresponding threshold level per class
341
+
342
+ Example:
343
+ >>> from torchmetrics.functional.classification import multilabel_recall_at_fixed_precision
344
+ >>> preds = torch.tensor([[0.75, 0.05, 0.35],
345
+ ... [0.45, 0.75, 0.05],
346
+ ... [0.05, 0.55, 0.75],
347
+ ... [0.05, 0.65, 0.05]])
348
+ >>> target = torch.tensor([[1, 0, 1],
349
+ ... [0, 0, 0],
350
+ ... [0, 1, 1],
351
+ ... [1, 1, 1]])
352
+ >>> multilabel_recall_at_fixed_precision(preds, target, num_labels=3, min_precision=0.5, thresholds=None)
353
+ (tensor([1., 1., 1.]), tensor([0.0500, 0.5500, 0.0500]))
354
+ >>> multilabel_recall_at_fixed_precision(preds, target, num_labels=3, min_precision=0.5, thresholds=5)
355
+ (tensor([1., 1., 1.]), tensor([0.0000, 0.5000, 0.0000]))
356
+ """
357
+ if validate_args:
358
+ _multilabel_recall_at_fixed_precision_arg_validation(num_labels, min_precision, thresholds, ignore_index)
359
+ _multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
360
+ preds, target, thresholds = _multilabel_precision_recall_curve_format(
361
+ preds, target, num_labels, thresholds, ignore_index
362
+ )
363
+ state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
364
+ return _multilabel_recall_at_fixed_precision_arg_compute(state, num_labels, thresholds, ignore_index, min_precision)
365
+
366
+
367
+ def recall_at_fixed_precision(
368
+ preds: Tensor,
369
+ target: Tensor,
370
+ task: Literal["binary", "multiclass", "multilabel"],
371
+ min_precision: float,
372
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
373
+ num_classes: Optional[int] = None,
374
+ num_labels: Optional[int] = None,
375
+ ignore_index: Optional[int] = None,
376
+ validate_args: bool = True,
377
+ ) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
378
+ r"""Computes the highest possible recall value given the minimum precision thresholds provided. This is done by
379
+ first calculating the precision-recall curve for different thresholds and the find the recall for a given
380
+ precision level.
381
+
382
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
383
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
384
+ :func:`binary_recall_at_fixed_precision`, :func:`multiclass_recall_at_fixed_precision` and
385
+ :func:`multilabel_recall_at_fixed_precision` for the specific details of each argument influence and examples.
386
+ """
387
+ if task == "binary":
388
+ return binary_recall_at_fixed_precision(preds, target, min_precision, thresholds, ignore_index, validate_args)
389
+ if task == "multiclass":
390
+ assert isinstance(num_classes, int)
391
+ return multiclass_recall_at_fixed_precision(
392
+ preds, target, num_classes, min_precision, thresholds, ignore_index, validate_args
393
+ )
394
+ if task == "multilabel":
395
+ assert isinstance(num_labels, int)
396
+ return multilabel_recall_at_fixed_precision(
397
+ preds, target, num_labels, min_precision, thresholds, ignore_index, validate_args
398
+ )
399
+ raise ValueError(
400
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
401
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/roc.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import List, Optional, Tuple, Union
15
+
16
+ import torch
17
+ from torch import Tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.functional.classification.precision_recall_curve import (
21
+ _binary_clf_curve,
22
+ _binary_precision_recall_curve_arg_validation,
23
+ _binary_precision_recall_curve_format,
24
+ _binary_precision_recall_curve_tensor_validation,
25
+ _binary_precision_recall_curve_update,
26
+ _multiclass_precision_recall_curve_arg_validation,
27
+ _multiclass_precision_recall_curve_format,
28
+ _multiclass_precision_recall_curve_tensor_validation,
29
+ _multiclass_precision_recall_curve_update,
30
+ _multilabel_precision_recall_curve_arg_validation,
31
+ _multilabel_precision_recall_curve_format,
32
+ _multilabel_precision_recall_curve_tensor_validation,
33
+ _multilabel_precision_recall_curve_update,
34
+ )
35
+ from torchmetrics.utilities import rank_zero_warn
36
+ from torchmetrics.utilities.compute import _safe_divide
37
+
38
+
39
+ def _binary_roc_compute(
40
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
41
+ thresholds: Optional[Tensor],
42
+ pos_label: int = 1,
43
+ ) -> Tuple[Tensor, Tensor, Tensor]:
44
+ if isinstance(state, Tensor) and thresholds is not None:
45
+ tps = state[:, 1, 1]
46
+ fps = state[:, 0, 1]
47
+ fns = state[:, 1, 0]
48
+ tns = state[:, 0, 0]
49
+ tpr = _safe_divide(tps, tps + fns).flip(0)
50
+ fpr = _safe_divide(fps, fps + tns).flip(0)
51
+ thresholds = thresholds.flip(0)
52
+ else:
53
+ fps, tps, thresholds = _binary_clf_curve(preds=state[0], target=state[1], pos_label=pos_label)
54
+ # Add an extra threshold position to make sure that the curve starts at (0, 0)
55
+ tps = torch.cat([torch.zeros(1, dtype=tps.dtype, device=tps.device), tps])
56
+ fps = torch.cat([torch.zeros(1, dtype=fps.dtype, device=fps.device), fps])
57
+ thresholds = torch.cat([torch.ones(1, dtype=thresholds.dtype, device=thresholds.device), thresholds])
58
+
59
+ if fps[-1] <= 0:
60
+ rank_zero_warn(
61
+ "No negative samples in targets, false positive value should be meaningless."
62
+ " Returning zero tensor in false positive score",
63
+ UserWarning,
64
+ )
65
+ fpr = torch.zeros_like(thresholds)
66
+ else:
67
+ fpr = fps / fps[-1]
68
+
69
+ if tps[-1] <= 0:
70
+ rank_zero_warn(
71
+ "No positive samples in targets, true positive value should be meaningless."
72
+ " Returning zero tensor in true positive score",
73
+ UserWarning,
74
+ )
75
+ tpr = torch.zeros_like(thresholds)
76
+ else:
77
+ tpr = tps / tps[-1]
78
+
79
+ return fpr, tpr, thresholds
80
+
81
+
82
+ def binary_roc(
83
+ preds: Tensor,
84
+ target: Tensor,
85
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
86
+ ignore_index: Optional[int] = None,
87
+ validate_args: bool = True,
88
+ ) -> Tuple[Tensor, Tensor, Tensor]:
89
+ r"""Computes the Receiver Operating Characteristic (ROC) for binary tasks. The curve consist of multiple pairs
90
+ of true positive rate (TPR) and false positive rate (FPR) values evaluated at different thresholds, such that
91
+ the tradeoff between the two values can be seen.
92
+
93
+ Accepts the following input tensors:
94
+
95
+ - ``preds`` (float tensor): ``(N, ...)``. Preds should be a tensor containing probabilities or logits for each
96
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
97
+ sigmoid per element.
98
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
99
+ only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the positive class.
100
+
101
+ Additional dimension ``...`` will be flattened into the batch dimension.
102
+
103
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
104
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
105
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
106
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
107
+ size :math:`\mathcal{O}(n_{thresholds})` (constant memory).
108
+
109
+ Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr which
110
+ are sorted in reversed order during their calculation, such that they are monotome increasing.
111
+
112
+ Args:
113
+ preds: Tensor with predictions
114
+ target: Tensor with true labels
115
+ thresholds:
116
+ Can be one of:
117
+
118
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
119
+ all the data. Most accurate but also most memory consuming approach.
120
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
121
+ 0 to 1 as bins for the calculation.
122
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
123
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
124
+ bins for the calculation.
125
+
126
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
127
+ Set to ``False`` for faster computations.
128
+
129
+ Returns:
130
+ (tuple): a tuple of 3 tensors containing:
131
+
132
+ - fpr: an 1d tensor of size (n_thresholds+1, ) with false positive rate values
133
+ - tpr: an 1d tensor of size (n_thresholds+1, ) with true positive rate values
134
+ - thresholds: an 1d tensor of size (n_thresholds, ) with decreasing threshold values
135
+
136
+ Example:
137
+ >>> from torchmetrics.functional.classification import binary_roc
138
+ >>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
139
+ >>> target = torch.tensor([0, 1, 1, 0])
140
+ >>> binary_roc(preds, target, thresholds=None) # doctest: +NORMALIZE_WHITESPACE
141
+ (tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]),
142
+ tensor([0.0000, 0.0000, 0.5000, 1.0000, 1.0000]),
143
+ tensor([1.0000, 0.8000, 0.7000, 0.5000, 0.0000]))
144
+ >>> binary_roc(preds, target, thresholds=5) # doctest: +NORMALIZE_WHITESPACE
145
+ (tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]),
146
+ tensor([0., 0., 1., 1., 1.]),
147
+ tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000]))
148
+ """
149
+ if validate_args:
150
+ _binary_precision_recall_curve_arg_validation(thresholds, ignore_index)
151
+ _binary_precision_recall_curve_tensor_validation(preds, target, ignore_index)
152
+ preds, target, thresholds = _binary_precision_recall_curve_format(preds, target, thresholds, ignore_index)
153
+ state = _binary_precision_recall_curve_update(preds, target, thresholds)
154
+ return _binary_roc_compute(state, thresholds)
155
+
156
+
157
+ def _multiclass_roc_compute(
158
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
159
+ num_classes: int,
160
+ thresholds: Optional[Tensor],
161
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
162
+ if isinstance(state, Tensor) and thresholds is not None:
163
+ tps = state[:, :, 1, 1]
164
+ fps = state[:, :, 0, 1]
165
+ fns = state[:, :, 1, 0]
166
+ tns = state[:, :, 0, 0]
167
+ tpr = _safe_divide(tps, tps + fns).flip(0).T
168
+ fpr = _safe_divide(fps, fps + tns).flip(0).T
169
+ thresholds = thresholds.flip(0)
170
+ else:
171
+ fpr, tpr, thresholds = [], [], []
172
+ for i in range(num_classes):
173
+ res = _binary_roc_compute([state[0][:, i], state[1]], thresholds=None, pos_label=i)
174
+ fpr.append(res[0])
175
+ tpr.append(res[1])
176
+ thresholds.append(res[2])
177
+ return fpr, tpr, thresholds
178
+
179
+
180
+ def multiclass_roc(
181
+ preds: Tensor,
182
+ target: Tensor,
183
+ num_classes: int,
184
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
185
+ ignore_index: Optional[int] = None,
186
+ validate_args: bool = True,
187
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
188
+ r"""Computes the Receiver Operating Characteristic (ROC) for multiclass tasks. The curve consist of multiple
189
+ pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at different thresholds, such
190
+ that the tradeoff between the two values can be seen.
191
+
192
+ Accepts the following input tensors:
193
+
194
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
195
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
196
+ softmax per sample.
197
+ - ``target`` (int tensor): ``(N, ...)``. Target should be a tensor containing ground truth labels, and therefore
198
+ only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).
199
+
200
+ Additional dimension ``...`` will be flattened into the batch dimension.
201
+
202
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
203
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
204
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
205
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
206
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).
207
+
208
+ Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr which
209
+ are sorted in reversed order during their calculation, such that they are monotome increasing.
210
+
211
+ Args:
212
+ preds: Tensor with predictions
213
+ target: Tensor with true labels
214
+ num_classes: Integer specifing the number of classes
215
+ thresholds:
216
+ Can be one of:
217
+
218
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
219
+ all the data. Most accurate but also most memory consuming approach.
220
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
221
+ 0 to 1 as bins for the calculation.
222
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
223
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
224
+ bins for the calculation.
225
+
226
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
227
+ Set to ``False`` for faster computations.
228
+
229
+ Returns:
230
+ (tuple): a tuple of either 3 tensors or 3 lists containing
231
+
232
+ - fpr: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, )
233
+ with false positive rate values (length may differ between classes). If `thresholds` is set to something else,
234
+ then a single 2d tensor of size (n_classes, n_thresholds+1) with false positive rate values is returned.
235
+ - tpr: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds+1, )
236
+ with true positive rate values (length may differ between classes). If `thresholds` is set to something else,
237
+ then a single 2d tensor of size (n_classes, n_thresholds+1) with true positive rate values is returned.
238
+ - thresholds: if `thresholds=None` a list for each class is returned with an 1d tensor of size (n_thresholds, )
239
+ with decreasing threshold values (length may differ between classes). If `threshold` is set to something else,
240
+ then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all classes.
241
+
242
+ Example:
243
+ >>> from torchmetrics.functional.classification import multiclass_roc
244
+ >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
245
+ ... [0.05, 0.75, 0.05, 0.05, 0.05],
246
+ ... [0.05, 0.05, 0.75, 0.05, 0.05],
247
+ ... [0.05, 0.05, 0.05, 0.75, 0.05]])
248
+ >>> target = torch.tensor([0, 1, 3, 2])
249
+ >>> fpr, tpr, thresholds = multiclass_roc(
250
+ ... preds, target, num_classes=5, thresholds=None
251
+ ... )
252
+ >>> fpr # doctest: +NORMALIZE_WHITESPACE
253
+ [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]),
254
+ tensor([0.0000, 0.3333, 1.0000]), tensor([0., 1.])]
255
+ >>> tpr
256
+ [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0., 0.])]
257
+ >>> thresholds # doctest: +NORMALIZE_WHITESPACE
258
+ [tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.7500, 0.0500]),
259
+ tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.7500, 0.0500]), tensor([1.0000, 0.0500])]
260
+ >>> multiclass_roc(
261
+ ... preds, target, num_classes=5, thresholds=5
262
+ ... ) # doctest: +NORMALIZE_WHITESPACE
263
+ (tensor([[0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
264
+ [0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
265
+ [0.0000, 0.3333, 0.3333, 0.3333, 1.0000],
266
+ [0.0000, 0.3333, 0.3333, 0.3333, 1.0000],
267
+ [0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
268
+ tensor([[0., 1., 1., 1., 1.],
269
+ [0., 1., 1., 1., 1.],
270
+ [0., 0., 0., 0., 1.],
271
+ [0., 0., 0., 0., 1.],
272
+ [0., 0., 0., 0., 0.]]),
273
+ tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000]))
274
+ """
275
+ if validate_args:
276
+ _multiclass_precision_recall_curve_arg_validation(num_classes, thresholds, ignore_index)
277
+ _multiclass_precision_recall_curve_tensor_validation(preds, target, num_classes, ignore_index)
278
+ preds, target, thresholds = _multiclass_precision_recall_curve_format(
279
+ preds, target, num_classes, thresholds, ignore_index
280
+ )
281
+ state = _multiclass_precision_recall_curve_update(preds, target, num_classes, thresholds)
282
+ return _multiclass_roc_compute(state, num_classes, thresholds)
283
+
284
+
285
+ def _multilabel_roc_compute(
286
+ state: Union[Tensor, Tuple[Tensor, Tensor]],
287
+ num_labels: int,
288
+ thresholds: Optional[Tensor],
289
+ ignore_index: Optional[int] = None,
290
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
291
+ if isinstance(state, Tensor) and thresholds is not None:
292
+ tps = state[:, :, 1, 1]
293
+ fps = state[:, :, 0, 1]
294
+ fns = state[:, :, 1, 0]
295
+ tns = state[:, :, 0, 0]
296
+ tpr = _safe_divide(tps, tps + fns).flip(0).T
297
+ fpr = _safe_divide(fps, fps + tns).flip(0).T
298
+ thresholds = thresholds.flip(0)
299
+ else:
300
+ fpr, tpr, thresholds = [], [], []
301
+ for i in range(num_labels):
302
+ preds = state[0][:, i]
303
+ target = state[1][:, i]
304
+ if ignore_index is not None:
305
+ idx = target == ignore_index
306
+ preds = preds[~idx]
307
+ target = target[~idx]
308
+ res = _binary_roc_compute([preds, target], thresholds=None, pos_label=1)
309
+ fpr.append(res[0])
310
+ tpr.append(res[1])
311
+ thresholds.append(res[2])
312
+ return fpr, tpr, thresholds
313
+
314
+
315
+ def multilabel_roc(
316
+ preds: Tensor,
317
+ target: Tensor,
318
+ num_labels: int,
319
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
320
+ ignore_index: Optional[int] = None,
321
+ validate_args: bool = True,
322
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
323
+ r"""Computes the Receiver Operating Characteristic (ROC) for multilabel tasks. The curve consist of multiple
324
+ pairs of true positive rate (TPR) and false positive rate (FPR) values evaluated at different thresholds, such
325
+ that the tradeoff between the two values can be seen.
326
+
327
+ Accepts the following input tensors:
328
+
329
+ - ``preds`` (float tensor): ``(N, C, ...)``. Preds should be a tensor containing probabilities or logits for each
330
+ observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
331
+ sigmoid per element.
332
+ - ``target`` (int tensor): ``(N, C, ...)``. Target should be a tensor containing ground truth labels, and therefore
333
+ only contain {0,1} values (except if `ignore_index` is specified).
334
+
335
+ Additional dimension ``...`` will be flattened into the batch dimension.
336
+
337
+ The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
338
+ that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
339
+ non-binned version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
340
+ argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
341
+ size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).
342
+
343
+ Note that outputted thresholds will be in reversed order to ensure that they corresponds to both fpr and tpr which
344
+ are sorted in reversed order during their calculation, such that they are monotome increasing.
345
+
346
+ Args:
347
+ preds: Tensor with predictions
348
+ target: Tensor with true labels
349
+ num_labels: Integer specifing the number of labels
350
+ thresholds:
351
+ Can be one of:
352
+
353
+ - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
354
+ all the data. Most accurate but also most memory consuming approach.
355
+ - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
356
+ 0 to 1 as bins for the calculation.
357
+ - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
358
+ - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
359
+ bins for the calculation.
360
+
361
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
362
+ Set to ``False`` for faster computations.
363
+
364
+ Returns:
365
+ (tuple): a tuple of either 3 tensors or 3 lists containing
366
+
367
+ - fpr: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, )
368
+ with false positive rate values (length may differ between labels). If `thresholds` is set to something else,
369
+ then a single 2d tensor of size (n_labels, n_thresholds+1) with false positive rate values is returned.
370
+ - tpr: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds+1, )
371
+ with true positive rate values (length may differ between labels). If `thresholds` is set to something else,
372
+ then a single 2d tensor of size (n_labels, n_thresholds+1) with true positive rate values is returned.
373
+ - thresholds: if `thresholds=None` a list for each label is returned with an 1d tensor of size (n_thresholds, )
374
+ with decreasing threshold values (length may differ between labels). If `threshold` is set to something else,
375
+ then a single 1d tensor of size (n_thresholds, ) is returned with shared threshold values for all labels.
376
+
377
+ Example:
378
+ >>> from torchmetrics.functional.classification import multilabel_roc
379
+ >>> preds = torch.tensor([[0.75, 0.05, 0.35],
380
+ ... [0.45, 0.75, 0.05],
381
+ ... [0.05, 0.55, 0.75],
382
+ ... [0.05, 0.65, 0.05]])
383
+ >>> target = torch.tensor([[1, 0, 1],
384
+ ... [0, 0, 0],
385
+ ... [0, 1, 1],
386
+ ... [1, 1, 1]])
387
+ >>> fpr, tpr, thresholds = multilabel_roc(
388
+ ... preds, target, num_labels=3, thresholds=None
389
+ ... )
390
+ >>> fpr # doctest: +NORMALIZE_WHITESPACE
391
+ [tensor([0.0000, 0.0000, 0.5000, 1.0000]),
392
+ tensor([0.0000, 0.5000, 0.5000, 0.5000, 1.0000]),
393
+ tensor([0., 0., 0., 1.])]
394
+ >>> tpr # doctest: +NORMALIZE_WHITESPACE
395
+ [tensor([0.0000, 0.5000, 0.5000, 1.0000]),
396
+ tensor([0.0000, 0.0000, 0.5000, 1.0000, 1.0000]),
397
+ tensor([0.0000, 0.3333, 0.6667, 1.0000])]
398
+ >>> thresholds # doctest: +NORMALIZE_WHITESPACE
399
+ [tensor([1.0000, 0.7500, 0.4500, 0.0500]),
400
+ tensor([1.0000, 0.7500, 0.6500, 0.5500, 0.0500]),
401
+ tensor([1.0000, 0.7500, 0.3500, 0.0500])]
402
+ >>> multilabel_roc(
403
+ ... preds, target, num_labels=3, thresholds=5
404
+ ... ) # doctest: +NORMALIZE_WHITESPACE
405
+ (tensor([[0.0000, 0.0000, 0.0000, 0.5000, 1.0000],
406
+ [0.0000, 0.5000, 0.5000, 0.5000, 1.0000],
407
+ [0.0000, 0.0000, 0.0000, 0.0000, 1.0000]]),
408
+ tensor([[0.0000, 0.5000, 0.5000, 0.5000, 1.0000],
409
+ [0.0000, 0.0000, 1.0000, 1.0000, 1.0000],
410
+ [0.0000, 0.3333, 0.3333, 0.6667, 1.0000]]),
411
+ tensor([1.0000, 0.7500, 0.5000, 0.2500, 0.0000]))
412
+ """
413
+ if validate_args:
414
+ _multilabel_precision_recall_curve_arg_validation(num_labels, thresholds, ignore_index)
415
+ _multilabel_precision_recall_curve_tensor_validation(preds, target, num_labels, ignore_index)
416
+ preds, target, thresholds = _multilabel_precision_recall_curve_format(
417
+ preds, target, num_labels, thresholds, ignore_index
418
+ )
419
+ state = _multilabel_precision_recall_curve_update(preds, target, num_labels, thresholds)
420
+ return _multilabel_roc_compute(state, num_labels, thresholds, ignore_index)
421
+
422
+
423
+ def roc(
424
+ preds: Tensor,
425
+ target: Tensor,
426
+ task: Literal["binary", "multiclass", "multilabel"],
427
+ thresholds: Optional[Union[int, List[float], Tensor]] = None,
428
+ num_classes: Optional[int] = None,
429
+ num_labels: Optional[int] = None,
430
+ ignore_index: Optional[int] = None,
431
+ validate_args: bool = True,
432
+ ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]:
433
+ r"""Computes the Receiver Operating Characteristic (ROC). The curve consist of multiple pairs of true positive
434
+ rate (TPR) and false positive rate (FPR) values evaluated at different thresholds, such that the tradeoff
435
+ between the two values can be seen.
436
+
437
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
438
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
439
+ :func:`binary_roc`, :func:`multiclass_roc` and :func:`multilabel_roc` for the specific details of each argument
440
+ influence and examples.
441
+
442
+ Legacy Example:
443
+ >>> pred = torch.tensor([0.0, 1.0, 2.0, 3.0])
444
+ >>> target = torch.tensor([0, 1, 1, 1])
445
+ >>> fpr, tpr, thresholds = roc(pred, target, task='binary')
446
+ >>> fpr
447
+ tensor([0., 0., 0., 0., 1.])
448
+ >>> tpr
449
+ tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000])
450
+ >>> thresholds
451
+ tensor([1.0000, 0.9526, 0.8808, 0.7311, 0.5000])
452
+
453
+ >>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05],
454
+ ... [0.05, 0.75, 0.05, 0.05],
455
+ ... [0.05, 0.05, 0.75, 0.05],
456
+ ... [0.05, 0.05, 0.05, 0.75]])
457
+ >>> target = torch.tensor([0, 1, 3, 2])
458
+ >>> fpr, tpr, thresholds = roc(pred, target, task='multiclass', num_classes=4)
459
+ >>> fpr
460
+ [tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])]
461
+ >>> tpr
462
+ [tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])]
463
+ >>> thresholds
464
+ [tensor([1.0000, 0.7500, 0.0500]),
465
+ tensor([1.0000, 0.7500, 0.0500]),
466
+ tensor([1.0000, 0.7500, 0.0500]),
467
+ tensor([1.0000, 0.7500, 0.0500])]
468
+
469
+ >>> pred = torch.tensor([[0.8191, 0.3680, 0.1138],
470
+ ... [0.3584, 0.7576, 0.1183],
471
+ ... [0.2286, 0.3468, 0.1338],
472
+ ... [0.8603, 0.0745, 0.1837]])
473
+ >>> target = torch.tensor([[1, 1, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1]])
474
+ >>> fpr, tpr, thresholds = roc(pred, target, task='multilabel', num_labels=3)
475
+ >>> fpr
476
+ [tensor([0.0000, 0.3333, 0.3333, 0.6667, 1.0000]),
477
+ tensor([0., 0., 0., 1., 1.]),
478
+ tensor([0.0000, 0.0000, 0.3333, 0.6667, 1.0000])]
479
+ >>> tpr
480
+ [tensor([0., 0., 1., 1., 1.]), tensor([0.0000, 0.3333, 0.6667, 0.6667, 1.0000]), tensor([0., 1., 1., 1., 1.])]
481
+ >>> thresholds
482
+ [tensor([1.0000, 0.8603, 0.8191, 0.3584, 0.2286]),
483
+ tensor([1.0000, 0.7576, 0.3680, 0.3468, 0.0745]),
484
+ tensor([1.0000, 0.1837, 0.1338, 0.1183, 0.1138])]
485
+ """
486
+ if task == "binary":
487
+ return binary_roc(preds, target, thresholds, ignore_index, validate_args)
488
+ if task == "multiclass":
489
+ assert isinstance(num_classes, int)
490
+ return multiclass_roc(preds, target, num_classes, thresholds, ignore_index, validate_args)
491
+ if task == "multilabel":
492
+ assert isinstance(num_labels, int)
493
+ return multilabel_roc(preds, target, num_labels, thresholds, ignore_index, validate_args)
494
+ raise ValueError(
495
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
496
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/classification/stat_scores.py ADDED
@@ -0,0 +1,1117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import List, Optional, Tuple, Union
15
+
16
+ import torch
17
+ from torch import Tensor, tensor
18
+ from typing_extensions import Literal
19
+
20
+ from torchmetrics.utilities.checks import _check_same_shape, _input_format_classification
21
+ from torchmetrics.utilities.data import _bincount, select_topk
22
+ from torchmetrics.utilities.enums import AverageMethod, DataType, MDMCAverageMethod
23
+
24
+
25
+ def _binary_stat_scores_arg_validation(
26
+ threshold: float = 0.5,
27
+ multidim_average: Literal["global", "samplewise"] = "global",
28
+ ignore_index: Optional[int] = None,
29
+ ) -> None:
30
+ """Validate non tensor input.
31
+
32
+ - ``threshold`` has to be a float in the [0,1] range
33
+ - ``multidim_average`` has to be either "global" or "samplewise"
34
+ - ``ignore_index`` has to be None or int
35
+ """
36
+ if not (isinstance(threshold, float) and (0 <= threshold <= 1)):
37
+ raise ValueError(f"Expected argument `threshold` to be a float in the [0,1] range, but got {threshold}.")
38
+ allowed_multidim_average = ("global", "samplewise")
39
+ if multidim_average not in allowed_multidim_average:
40
+ raise ValueError(
41
+ f"Expected argument `multidim_average` to be one of {allowed_multidim_average}, but got {multidim_average}"
42
+ )
43
+ if ignore_index is not None and not isinstance(ignore_index, int):
44
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
45
+
46
+
47
+ def _binary_stat_scores_tensor_validation(
48
+ preds: Tensor,
49
+ target: Tensor,
50
+ multidim_average: Literal["global", "samplewise"] = "global",
51
+ ignore_index: Optional[int] = None,
52
+ ) -> None:
53
+ """Validate tensor input.
54
+
55
+ - tensors have to be of same shape
56
+ - all values in target tensor that are not ignored have to be in {0, 1}
57
+ - if pred tensor is not floating point, then all values also have to be in {0, 1}
58
+ - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be atleast 2 dimensional
59
+ """
60
+ # Check that they have same shape
61
+ _check_same_shape(preds, target)
62
+
63
+ # Check that target only contains [0,1] values or value in ignore_index
64
+ unique_values = torch.unique(target)
65
+ if ignore_index is None:
66
+ check = torch.any((unique_values != 0) & (unique_values != 1))
67
+ else:
68
+ check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
69
+ if check:
70
+ raise RuntimeError(
71
+ f"Detected the following values in `target`: {unique_values} but expected only"
72
+ f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}."
73
+ )
74
+
75
+ # If preds is label tensor, also check that it only contains [0,1] values
76
+ if not preds.is_floating_point():
77
+ unique_values = torch.unique(preds)
78
+ if torch.any((unique_values != 0) & (unique_values != 1)):
79
+ raise RuntimeError(
80
+ f"Detected the following values in `preds`: {unique_values} but expected only"
81
+ " the following values [0,1] since `preds` is a label tensor."
82
+ )
83
+
84
+ if multidim_average != "global" and preds.ndim < 2:
85
+ raise ValueError("Expected input to be atleast 2D when multidim_average is set to `samplewise`")
86
+
87
+
88
+ def _binary_stat_scores_format(
89
+ preds: Tensor,
90
+ target: Tensor,
91
+ threshold: float = 0.5,
92
+ ignore_index: Optional[int] = None,
93
+ ) -> Tuple[Tensor, Tensor]:
94
+ """Convert all input to label format.
95
+
96
+ - If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range
97
+ - If preds tensor is floating point, thresholds afterwards
98
+ - Mask all datapoints that should be ignored with negative values
99
+ """
100
+ if preds.is_floating_point():
101
+ if not torch.all((0 <= preds) * (preds <= 1)):
102
+ # preds is logits, convert with sigmoid
103
+ preds = preds.sigmoid()
104
+ preds = preds > threshold
105
+
106
+ preds = preds.reshape(preds.shape[0], -1)
107
+ target = target.reshape(target.shape[0], -1)
108
+
109
+ if ignore_index is not None:
110
+ idx = target == ignore_index
111
+ target = target.clone()
112
+ target[idx] = -1
113
+
114
+ return preds, target
115
+
116
+
117
+ def _binary_stat_scores_update(
118
+ preds: Tensor,
119
+ target: Tensor,
120
+ multidim_average: Literal["global", "samplewise"] = "global",
121
+ ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
122
+ """Computes the statistics."""
123
+ sum_dim = [0, 1] if multidim_average == "global" else 1
124
+ tp = ((target == preds) & (target == 1)).sum(sum_dim).squeeze()
125
+ fn = ((target != preds) & (target == 1)).sum(sum_dim).squeeze()
126
+ fp = ((target != preds) & (target == 0)).sum(sum_dim).squeeze()
127
+ tn = ((target == preds) & (target == 0)).sum(sum_dim).squeeze()
128
+ return tp, fp, tn, fn
129
+
130
+
131
+ def _binary_stat_scores_compute(
132
+ tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor, multidim_average: Literal["global", "samplewise"] = "global"
133
+ ) -> Tensor:
134
+ """Stack statistics and compute support also."""
135
+ return torch.stack([tp, fp, tn, fn, tp + fn], dim=0 if multidim_average == "global" else 1).squeeze()
136
+
137
+
138
+ def binary_stat_scores(
139
+ preds: Tensor,
140
+ target: Tensor,
141
+ threshold: float = 0.5,
142
+ multidim_average: Literal["global", "samplewise"] = "global",
143
+ ignore_index: Optional[int] = None,
144
+ validate_args: bool = True,
145
+ ) -> Tensor:
146
+ r"""Computes the number of true positives, false positives, true negatives, false negatives and the support for
147
+ binary tasks. Related to `Type I and Type II errors`_.
148
+
149
+ Accepts the following input tensors:
150
+
151
+ - ``preds`` (int or float tensor): ``(N, ...)``. If preds is a floating point tensor with values outside
152
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
153
+ we convert to int tensor with thresholding using the value in ``threshold``.
154
+ - ``target`` (int tensor): ``(N, ...)``
155
+
156
+ Args:
157
+ preds: Tensor with predictions
158
+ target: Tensor with true labels
159
+ threshold: Threshold for transforming probability to binary {0,1} predictions
160
+ multidim_average:
161
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
162
+
163
+ - ``global``: Additional dimensions are flatted along the batch dimension
164
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
165
+ The statistics in this case are calculated over the additional dimensions.
166
+
167
+ ignore_index:
168
+ Specifies a target value that is ignored and does not contribute to the metric calculation
169
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
170
+ Set to ``False`` for faster computations.
171
+
172
+ Returns:
173
+ The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds
174
+ to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape
175
+ depends on the ``multidim_average`` parameter:
176
+
177
+ - If ``multidim_average`` is set to ``global``, the shape will be ``(5,)``
178
+ - If ``multidim_average`` is set to ``samplewise``, the shape will be ``(N, 5)``
179
+
180
+ Example (preds is int tensor):
181
+ >>> from torchmetrics.functional.classification import binary_stat_scores
182
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
183
+ >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
184
+ >>> binary_stat_scores(preds, target)
185
+ tensor([2, 1, 2, 1, 3])
186
+
187
+ Example (preds is float tensor):
188
+ >>> from torchmetrics.functional.classification import binary_stat_scores
189
+ >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
190
+ >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
191
+ >>> binary_stat_scores(preds, target)
192
+ tensor([2, 1, 2, 1, 3])
193
+
194
+ Example (multidim tensors):
195
+ >>> from torchmetrics.functional.classification import binary_stat_scores
196
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
197
+ >>> preds = torch.tensor(
198
+ ... [
199
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
200
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
201
+ ... ]
202
+ ... )
203
+ >>> binary_stat_scores(preds, target, multidim_average='samplewise')
204
+ tensor([[2, 3, 0, 1, 3],
205
+ [0, 2, 1, 3, 3]])
206
+ """
207
+ if validate_args:
208
+ _binary_stat_scores_arg_validation(threshold, multidim_average, ignore_index)
209
+ _binary_stat_scores_tensor_validation(preds, target, multidim_average, ignore_index)
210
+ preds, target = _binary_stat_scores_format(preds, target, threshold, ignore_index)
211
+ tp, fp, tn, fn = _binary_stat_scores_update(preds, target, multidim_average)
212
+ return _binary_stat_scores_compute(tp, fp, tn, fn, multidim_average)
213
+
214
+
215
+ def _multiclass_stat_scores_arg_validation(
216
+ num_classes: int,
217
+ top_k: int = 1,
218
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
219
+ multidim_average: Literal["global", "samplewise"] = "global",
220
+ ignore_index: Optional[int] = None,
221
+ ) -> None:
222
+ """Validate non tensor input.
223
+
224
+ - ``num_classes`` has to be a int larger than 1
225
+ - ``top_k`` has to be an int larger than 0 but no larger than number of classes
226
+ - ``average`` has to be "micro" | "macro" | "weighted" | "none"
227
+ - ``multidim_average`` has to be either "global" or "samplewise"
228
+ - ``ignore_index`` has to be None or int
229
+ """
230
+ if not isinstance(num_classes, int) or num_classes < 2:
231
+ raise ValueError(f"Expected argument `num_classes` to be an integer larger than 1, but got {num_classes}")
232
+ if not isinstance(top_k, int) and top_k < 1:
233
+ raise ValueError(f"Expected argument `top_k` to be an integer larger than or equal to 1, but got {top_k}")
234
+ if top_k > num_classes:
235
+ raise ValueError(
236
+ f"Expected argument `top_k` to be smaller or equal to `num_classes` but got {top_k} and {num_classes}"
237
+ )
238
+ allowed_average = ("micro", "macro", "weighted", "none", None)
239
+ if average not in allowed_average:
240
+ raise ValueError(f"Expected argument `average` to be one of {allowed_average}, but got {average}")
241
+ allowed_multidim_average = ("global", "samplewise")
242
+ if multidim_average not in allowed_multidim_average:
243
+ raise ValueError(
244
+ f"Expected argument `multidim_average` to be one of {allowed_multidim_average}, but got {multidim_average}"
245
+ )
246
+ if ignore_index is not None and not isinstance(ignore_index, int):
247
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
248
+
249
+
250
+ def _multiclass_stat_scores_tensor_validation(
251
+ preds: Tensor,
252
+ target: Tensor,
253
+ num_classes: int,
254
+ multidim_average: Literal["global", "samplewise"] = "global",
255
+ ignore_index: Optional[int] = None,
256
+ ) -> None:
257
+ """Validate tensor input.
258
+
259
+ - if target has one more dimension than preds, then all dimensions except for preds.shape[1] should match
260
+ exactly. preds.shape[1] should have size equal to number of classes
261
+ - if preds and target have same number of dims, then all dimensions should match
262
+ - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be atleast 2 dimensional in the
263
+ int case and 3 dimensional in the float case
264
+ - all values in target tensor that are not ignored have to be {0, ..., num_classes - 1}
265
+ - if pred tensor is not floating point, then all values also have to be in {0, ..., num_classes - 1}
266
+ """
267
+ if preds.ndim == target.ndim + 1:
268
+ if not preds.is_floating_point():
269
+ raise ValueError("If `preds` have one dimension more than `target`, `preds` should be a float tensor.")
270
+ if preds.shape[1] != num_classes:
271
+ raise ValueError(
272
+ "If `preds` have one dimension more than `target`, `preds.shape[1]` should be"
273
+ " equal to number of classes."
274
+ )
275
+ if preds.shape[2:] != target.shape[1:]:
276
+ raise ValueError(
277
+ "If `preds` have one dimension more than `target`, the shape of `preds` should be"
278
+ " (N, C, ...), and the shape of `target` should be (N, ...)."
279
+ )
280
+ if multidim_average != "global" and preds.ndim < 3:
281
+ raise ValueError(
282
+ "If `preds` have one dimension more than `target`, the shape of `preds` should "
283
+ " atleast 3D when multidim_average is set to `samplewise`"
284
+ )
285
+
286
+ elif preds.ndim == target.ndim:
287
+ if preds.shape != target.shape:
288
+ raise ValueError(
289
+ "The `preds` and `target` should have the same shape,",
290
+ f" got `preds` with shape={preds.shape} and `target` with shape={target.shape}.",
291
+ )
292
+ if multidim_average != "global" and preds.ndim < 2:
293
+ raise ValueError(
294
+ "When `preds` and `target` have the same shape, the shape of `preds` should "
295
+ " atleast 2D when multidim_average is set to `samplewise`"
296
+ )
297
+ else:
298
+ raise ValueError(
299
+ "Either `preds` and `target` both should have the (same) shape (N, ...), or `target` should be (N, ...)"
300
+ " and `preds` should be (N, C, ...)."
301
+ )
302
+
303
+ num_unique_values = len(torch.unique(target))
304
+ if ignore_index is None:
305
+ check = num_unique_values > num_classes
306
+ else:
307
+ check = num_unique_values > num_classes + 1
308
+ if check:
309
+ raise RuntimeError(
310
+ "Detected more unique values in `target` than `num_classes`. Expected only "
311
+ f"{num_classes if ignore_index is None else num_classes + 1} but found"
312
+ f"{num_unique_values} in `target`."
313
+ )
314
+
315
+ if not preds.is_floating_point():
316
+ unique_values = torch.unique(preds)
317
+ if len(unique_values) > num_classes:
318
+ raise RuntimeError(
319
+ "Detected more unique values in `preds` than `num_classes`. Expected only "
320
+ f"{num_classes} but found {len(unique_values)} in `preds`."
321
+ )
322
+
323
+
324
+ def _multiclass_stat_scores_format(
325
+ preds: Tensor,
326
+ target: Tensor,
327
+ top_k: int = 1,
328
+ ) -> Tuple[Tensor, Tensor]:
329
+ """Convert all input to label format except if ``top_k`` is not 1.
330
+
331
+ - Applies argmax if preds have one more dimension than target
332
+ - Flattens additional dimensions
333
+ """
334
+ # Apply argmax if we have one more dimension
335
+ if preds.ndim == target.ndim + 1 and top_k == 1:
336
+ preds = preds.argmax(dim=1)
337
+ if top_k != 1:
338
+ preds = preds.reshape(*preds.shape[:2], -1)
339
+ else:
340
+ preds = preds.reshape(preds.shape[0], -1)
341
+ target = target.reshape(target.shape[0], -1)
342
+ return preds, target
343
+
344
+
345
+ def _multiclass_stat_scores_update(
346
+ preds: Tensor,
347
+ target: Tensor,
348
+ num_classes: int,
349
+ top_k: int = 1,
350
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
351
+ multidim_average: Literal["global", "samplewise"] = "global",
352
+ ignore_index: Optional[int] = None,
353
+ ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
354
+ """Computes the statistics.
355
+
356
+ - If ``multidim_average`` is equal to samplewise or ``top_k`` is not 1, we transform both preds and
357
+ target into one hot format.
358
+ - Else we calculate statistics by first calculating the confusion matrix and afterwards deriving the
359
+ statistics from that
360
+ - Remove all datapoints that should be ignored. Depending on if ``ignore_index`` is in the set of labels
361
+ or outside we have do use different augmentation stategies when one hot encoding.
362
+ """
363
+ if multidim_average == "samplewise" or top_k != 1:
364
+ ignore_in = 0 <= ignore_index <= num_classes - 1 if ignore_index is not None else None
365
+ if ignore_index is not None and not ignore_in:
366
+ preds = preds.clone()
367
+ target = target.clone()
368
+ idx = target == ignore_index
369
+ target[idx] = num_classes
370
+ idx = idx.unsqueeze(1).repeat(1, num_classes, 1) if preds.ndim > target.ndim else idx
371
+ preds[idx] = num_classes
372
+
373
+ if top_k > 1:
374
+ preds_oh = torch.movedim(select_topk(preds, topk=top_k, dim=1), 1, -1)
375
+ else:
376
+ preds_oh = torch.nn.functional.one_hot(
377
+ preds, num_classes + 1 if ignore_index is not None and not ignore_in else num_classes
378
+ )
379
+ target_oh = torch.nn.functional.one_hot(
380
+ target, num_classes + 1 if ignore_index is not None and not ignore_in else num_classes
381
+ )
382
+ if ignore_index is not None:
383
+ if 0 <= ignore_index <= num_classes - 1:
384
+ target_oh[target == ignore_index, :] = -1
385
+ else:
386
+ preds_oh = preds_oh[..., :-1] if top_k == 1 else preds_oh
387
+ target_oh = target_oh[..., :-1]
388
+ target_oh[target == num_classes, :] = -1
389
+ sum_dim = [0, 1] if multidim_average == "global" else [1]
390
+ tp = ((target_oh == preds_oh) & (target_oh == 1)).sum(sum_dim)
391
+ fn = ((target_oh != preds_oh) & (target_oh == 1)).sum(sum_dim)
392
+ fp = ((target_oh != preds_oh) & (target_oh == 0)).sum(sum_dim)
393
+ tn = ((target_oh == preds_oh) & (target_oh == 0)).sum(sum_dim)
394
+ elif average == "micro":
395
+ preds = preds.flatten()
396
+ target = target.flatten()
397
+ if ignore_index is not None:
398
+ idx = target != ignore_index
399
+ preds = preds[idx]
400
+ target = target[idx]
401
+ tp = (preds == target).sum()
402
+ fp = (preds != target).sum()
403
+ fn = (preds != target).sum()
404
+ tn = num_classes * preds.numel() - (fp + fn + tp)
405
+ else:
406
+ preds = preds.flatten()
407
+ target = target.flatten()
408
+ if ignore_index is not None:
409
+ idx = target != ignore_index
410
+ preds = preds[idx]
411
+ target = target[idx]
412
+ unique_mapping = target.to(torch.long) * num_classes + preds.to(torch.long)
413
+ bins = _bincount(unique_mapping, minlength=num_classes**2)
414
+ confmat = bins.reshape(num_classes, num_classes)
415
+ tp = confmat.diag()
416
+ fp = confmat.sum(0) - tp
417
+ fn = confmat.sum(1) - tp
418
+ tn = confmat.sum() - (fp + fn + tp)
419
+ return tp, fp, tn, fn
420
+
421
+
422
+ def _multiclass_stat_scores_compute(
423
+ tp: Tensor,
424
+ fp: Tensor,
425
+ tn: Tensor,
426
+ fn: Tensor,
427
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
428
+ multidim_average: Literal["global", "samplewise"] = "global",
429
+ ) -> Tensor:
430
+ """Stack statistics and compute support also.
431
+
432
+ Applies average strategy afterwards.
433
+ """
434
+ res = torch.stack([tp, fp, tn, fn, tp + fn], dim=-1)
435
+ sum_dim = 0 if multidim_average == "global" else 1
436
+ if average == "micro":
437
+ return res.sum(sum_dim) if res.ndim > 1 else res
438
+ if average == "macro":
439
+ return res.float().mean(sum_dim)
440
+ elif average == "weighted":
441
+ weight = tp + fn
442
+ if multidim_average == "global":
443
+ return (res * (weight / weight.sum()).reshape(*weight.shape, 1)).sum(sum_dim)
444
+ else:
445
+ return (res * (weight / weight.sum(-1, keepdim=True)).reshape(*weight.shape, 1)).sum(sum_dim)
446
+ elif average is None or average == "none":
447
+ return res
448
+
449
+
450
+ def multiclass_stat_scores(
451
+ preds: Tensor,
452
+ target: Tensor,
453
+ num_classes: int,
454
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
455
+ top_k: int = 1,
456
+ multidim_average: Literal["global", "samplewise"] = "global",
457
+ ignore_index: Optional[int] = None,
458
+ validate_args: bool = True,
459
+ ) -> Tensor:
460
+ r"""Computes the number of true positives, false positives, true negatives, false negatives and the support for
461
+ multiclass tasks. Related to `Type I and Type II errors`_.
462
+
463
+ Accepts the following input tensors:
464
+
465
+ - ``preds``: ``(N, ...)`` (int tensor) or ``(N, C, ..)`` (float tensor). If preds is a floating point
466
+ we apply ``torch.argmax`` along the ``C`` dimension to automatically convert probabilities/logits into
467
+ an int tensor.
468
+ - ``target`` (int tensor): ``(N, ...)``
469
+
470
+ Args:
471
+ preds: Tensor with predictions
472
+ target: Tensor with true labels
473
+ num_classes: Integer specifing the number of classes
474
+ average:
475
+ Defines the reduction that is applied over labels. Should be one of the following:
476
+
477
+ - ``micro``: Sum statistics over all labels
478
+ - ``macro``: Calculate statistics for each label and average them
479
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
480
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
481
+ top_k:
482
+ Number of highest probability or logit score predictions considered to find the correct label.
483
+ Only works when ``preds`` contain probabilities/logits.
484
+ multidim_average:
485
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
486
+
487
+ - ``global``: Additional dimensions are flatted along the batch dimension
488
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
489
+ The statistics in this case are calculated over the additional dimensions.
490
+
491
+ ignore_index:
492
+ Specifies a target value that is ignored and does not contribute to the metric calculation
493
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
494
+ Set to ``False`` for faster computations.
495
+
496
+ Returns:
497
+ The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds
498
+ to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape
499
+ depends on ``average`` and ``multidim_average`` parameters:
500
+
501
+ - If ``multidim_average`` is set to ``global``:
502
+
503
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(5,)``
504
+ - If ``average=None/'none'``, the shape will be ``(C, 5)``
505
+
506
+ - If ``multidim_average`` is set to ``samplewise``:
507
+
508
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N, 5)``
509
+ - If ``average=None/'none'``, the shape will be ``(N, C, 5)``
510
+
511
+ Example (preds is int tensor):
512
+ >>> from torchmetrics.functional.classification import multiclass_stat_scores
513
+ >>> target = torch.tensor([2, 1, 0, 0])
514
+ >>> preds = torch.tensor([2, 1, 0, 1])
515
+ >>> multiclass_stat_scores(preds, target, num_classes=3, average='micro')
516
+ tensor([3, 1, 7, 1, 4])
517
+ >>> multiclass_stat_scores(preds, target, num_classes=3, average=None)
518
+ tensor([[1, 0, 2, 1, 2],
519
+ [1, 1, 2, 0, 1],
520
+ [1, 0, 3, 0, 1]])
521
+
522
+ Example (preds is float tensor):
523
+ >>> from torchmetrics.functional.classification import multiclass_stat_scores
524
+ >>> target = torch.tensor([2, 1, 0, 0])
525
+ >>> preds = torch.tensor([
526
+ ... [0.16, 0.26, 0.58],
527
+ ... [0.22, 0.61, 0.17],
528
+ ... [0.71, 0.09, 0.20],
529
+ ... [0.05, 0.82, 0.13],
530
+ ... ])
531
+ >>> multiclass_stat_scores(preds, target, num_classes=3, average='micro')
532
+ tensor([3, 1, 7, 1, 4])
533
+ >>> multiclass_stat_scores(preds, target, num_classes=3, average=None)
534
+ tensor([[1, 0, 2, 1, 2],
535
+ [1, 1, 2, 0, 1],
536
+ [1, 0, 3, 0, 1]])
537
+
538
+ Example (multidim tensors):
539
+ >>> from torchmetrics.functional.classification import multiclass_stat_scores
540
+ >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
541
+ >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
542
+ >>> multiclass_stat_scores(preds, target, num_classes=3, multidim_average='samplewise', average='micro')
543
+ tensor([[3, 3, 9, 3, 6],
544
+ [2, 4, 8, 4, 6]])
545
+ >>> multiclass_stat_scores(preds, target, num_classes=3, multidim_average='samplewise', average=None)
546
+ tensor([[[2, 1, 3, 0, 2],
547
+ [0, 1, 3, 2, 2],
548
+ [1, 1, 3, 1, 2]],
549
+ [[0, 1, 4, 1, 1],
550
+ [1, 1, 2, 2, 3],
551
+ [1, 2, 2, 1, 2]]])
552
+ """
553
+ if validate_args:
554
+ _multiclass_stat_scores_arg_validation(num_classes, top_k, average, multidim_average, ignore_index)
555
+ _multiclass_stat_scores_tensor_validation(preds, target, num_classes, multidim_average, ignore_index)
556
+ preds, target = _multiclass_stat_scores_format(preds, target, top_k)
557
+ tp, fp, tn, fn = _multiclass_stat_scores_update(
558
+ preds, target, num_classes, top_k, average, multidim_average, ignore_index
559
+ )
560
+ return _multiclass_stat_scores_compute(tp, fp, tn, fn, average, multidim_average)
561
+
562
+
563
+ def _multilabel_stat_scores_arg_validation(
564
+ num_labels: int,
565
+ threshold: float = 0.5,
566
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
567
+ multidim_average: Literal["global", "samplewise"] = "global",
568
+ ignore_index: Optional[int] = None,
569
+ ) -> None:
570
+ """Validate non tensor input.
571
+
572
+ - ``num_labels`` should be an int larger than 1
573
+ - ``threshold`` has to be a float in the [0,1] range
574
+ - ``average`` has to be "micro" | "macro" | "weighted" | "none"
575
+ - ``multidim_average`` has to be either "global" or "samplewise"
576
+ - ``ignore_index`` has to be None or int
577
+ """
578
+ if not isinstance(num_labels, int) or num_labels < 2:
579
+ raise ValueError(f"Expected argument `num_labels` to be an integer larger than 1, but got {num_labels}")
580
+ if not (isinstance(threshold, float) and (0 <= threshold <= 1)):
581
+ raise ValueError(f"Expected argument `threshold` to be a float, but got {threshold}.")
582
+ allowed_average = ("micro", "macro", "weighted", "none", None)
583
+ if average not in allowed_average:
584
+ raise ValueError(f"Expected argument `average` to be one of {allowed_average}, but got {average}")
585
+ allowed_multidim_average = ("global", "samplewise")
586
+ if multidim_average not in allowed_multidim_average:
587
+ raise ValueError(
588
+ f"Expected argument `multidim_average` to be one of {allowed_multidim_average}, but got {multidim_average}"
589
+ )
590
+ if ignore_index is not None and not isinstance(ignore_index, int):
591
+ raise ValueError(f"Expected argument `ignore_index` to either be `None` or an integer, but got {ignore_index}")
592
+
593
+
594
+ def _multilabel_stat_scores_tensor_validation(
595
+ preds: Tensor,
596
+ target: Tensor,
597
+ num_labels: int,
598
+ multidim_average: str,
599
+ ignore_index: Optional[int] = None,
600
+ ) -> None:
601
+ """Validate tensor input.
602
+
603
+ - tensors have to be of same shape
604
+ - the second dimension of both tensors need to be equal to the number of labels
605
+ - all values in target tensor that are not ignored have to be in {0, 1}
606
+ - if pred tensor is not floating point, then all values also have to be in {0, 1}
607
+ - if ``multidim_average`` is set to ``samplewise`` preds tensor needs to be atleast 3 dimensional
608
+ """
609
+ # Check that they have same shape
610
+ _check_same_shape(preds, target)
611
+
612
+ if preds.shape[1] != num_labels:
613
+ raise ValueError(
614
+ "Expected both `target.shape[1]` and `preds.shape[1]` to be equal to the number of labels"
615
+ f" but got {preds.shape[1]} and expected {num_labels}"
616
+ )
617
+
618
+ # Check that target only contains [0,1] values or value in ignore_index
619
+ unique_values = torch.unique(target)
620
+ if ignore_index is None:
621
+ check = torch.any((unique_values != 0) & (unique_values != 1))
622
+ else:
623
+ check = torch.any((unique_values != 0) & (unique_values != 1) & (unique_values != ignore_index))
624
+ if check:
625
+ raise RuntimeError(
626
+ f"Detected the following values in `target`: {unique_values} but expected only"
627
+ f" the following values {[0,1] + [] if ignore_index is None else [ignore_index]}."
628
+ )
629
+
630
+ # If preds is label tensor, also check that it only contains [0,1] values
631
+ if not preds.is_floating_point():
632
+ unique_values = torch.unique(preds)
633
+ if torch.any((unique_values != 0) & (unique_values != 1)):
634
+ raise RuntimeError(
635
+ f"Detected the following values in `preds`: {unique_values} but expected only"
636
+ " the following values [0,1] since preds is a label tensor."
637
+ )
638
+
639
+ if multidim_average != "global" and preds.ndim < 3:
640
+ raise ValueError("Expected input to be atleast 3D when multidim_average is set to `samplewise`")
641
+
642
+
643
+ def _multilabel_stat_scores_format(
644
+ preds: Tensor, target: Tensor, num_labels: int, threshold: float = 0.5, ignore_index: Optional[int] = None
645
+ ) -> Tuple[Tensor, Tensor]:
646
+ """Convert all input to label format.
647
+
648
+ - If preds tensor is floating point, applies sigmoid if pred tensor not in [0,1] range
649
+ - If preds tensor is floating point, thresholds afterwards
650
+ - Mask all elements that should be ignored with negative numbers for later filtration
651
+ """
652
+ if preds.is_floating_point():
653
+ if not torch.all((0 <= preds) * (preds <= 1)):
654
+ preds = preds.sigmoid()
655
+ preds = preds > threshold
656
+ preds = preds.reshape(*preds.shape[:2], -1)
657
+ target = target.reshape(*target.shape[:2], -1)
658
+
659
+ if ignore_index is not None:
660
+ idx = target == ignore_index
661
+ target = target.clone()
662
+ target[idx] = -1
663
+
664
+ return preds, target
665
+
666
+
667
+ def _multilabel_stat_scores_update(
668
+ preds: Tensor, target: Tensor, multidim_average: Literal["global", "samplewise"] = "global"
669
+ ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
670
+ """Computes the statistics."""
671
+ sum_dim = [0, -1] if multidim_average == "global" else [-1]
672
+ tp = ((target == preds) & (target == 1)).sum(sum_dim).squeeze()
673
+ fn = ((target != preds) & (target == 1)).sum(sum_dim).squeeze()
674
+ fp = ((target != preds) & (target == 0)).sum(sum_dim).squeeze()
675
+ tn = ((target == preds) & (target == 0)).sum(sum_dim).squeeze()
676
+ return tp, fp, tn, fn
677
+
678
+
679
+ def _multilabel_stat_scores_compute(
680
+ tp: Tensor,
681
+ fp: Tensor,
682
+ tn: Tensor,
683
+ fn: Tensor,
684
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
685
+ multidim_average: Literal["global", "samplewise"] = "global",
686
+ ) -> Tensor:
687
+ """Stack statistics and compute support also.
688
+
689
+ Applies average strategy afterwards.
690
+ """
691
+ res = torch.stack([tp, fp, tn, fn, tp + fn], dim=-1)
692
+ sum_dim = 0 if multidim_average == "global" else 1
693
+ if average == "micro":
694
+ return res.sum(sum_dim)
695
+ elif average == "macro":
696
+ return res.float().mean(sum_dim)
697
+ elif average == "weighted":
698
+ w = tp + fn
699
+ return (res * (w / w.sum()).reshape(*w.shape, 1)).sum(sum_dim)
700
+ elif average is None or average == "none":
701
+ return res
702
+
703
+
704
+ def multilabel_stat_scores(
705
+ preds: Tensor,
706
+ target: Tensor,
707
+ num_labels: int,
708
+ threshold: float = 0.5,
709
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "macro",
710
+ multidim_average: Literal["global", "samplewise"] = "global",
711
+ ignore_index: Optional[int] = None,
712
+ validate_args: bool = True,
713
+ ) -> Tensor:
714
+ r"""Computes the number of true positives, false positives, true negatives, false negatives and the support for
715
+ multilabel tasks. Related to `Type I and Type II errors`_.
716
+
717
+ Accepts the following input tensors:
718
+
719
+ - ``preds`` (int or float tensor): ``(N, C, ...)``. If preds is a floating point tensor with values outside
720
+ [0,1] range we consider the input to be logits and will auto apply sigmoid per element. Addtionally,
721
+ we convert to int tensor with thresholding using the value in ``threshold``.
722
+ - ``target`` (int tensor): ``(N, C, ...)``
723
+
724
+ Args:
725
+ preds: Tensor with predictions
726
+ target: Tensor with true labels
727
+ num_labels: Integer specifing the number of labels
728
+ threshold: Threshold for transforming probability to binary (0,1) predictions
729
+ average:
730
+ Defines the reduction that is applied over labels. Should be one of the following:
731
+
732
+ - ``micro``: Sum statistics over all labels
733
+ - ``macro``: Calculate statistics for each label and average them
734
+ - ``weighted``: Calculates statistics for each label and computes weighted average using their support
735
+ - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
736
+
737
+ multidim_average:
738
+ Defines how additionally dimensions ``...`` should be handled. Should be one of the following:
739
+
740
+ - ``global``: Additional dimensions are flatted along the batch dimension
741
+ - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
742
+ The statistics in this case are calculated over the additional dimensions.
743
+
744
+ ignore_index:
745
+ Specifies a target value that is ignored and does not contribute to the metric calculation
746
+ validate_args: bool indicating if input arguments and tensors should be validated for correctness.
747
+ Set to ``False`` for faster computations.
748
+
749
+ Returns:
750
+ The metric returns a tensor of shape ``(..., 5)``, where the last dimension corresponds
751
+ to ``[tp, fp, tn, fn, sup]`` (``sup`` stands for support and equals ``tp + fn``). The shape
752
+ depends on ``average`` and ``multidim_average`` parameters:
753
+
754
+ - If ``multidim_average`` is set to ``global``:
755
+
756
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(5,)``
757
+ - If ``average=None/'none'``, the shape will be ``(C, 5)``
758
+
759
+ - If ``multidim_average`` is set to ``samplewise``:
760
+
761
+ - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N, 5)``
762
+ - If ``average=None/'none'``, the shape will be ``(N, C, 5)``
763
+
764
+ Example (preds is int tensor):
765
+ >>> from torchmetrics.functional.classification import multilabel_stat_scores
766
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
767
+ >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
768
+ >>> multilabel_stat_scores(preds, target, num_labels=3, average='micro')
769
+ tensor([2, 1, 2, 1, 3])
770
+ >>> multilabel_stat_scores(preds, target, num_labels=3, average=None)
771
+ tensor([[1, 0, 1, 0, 1],
772
+ [0, 0, 1, 1, 1],
773
+ [1, 1, 0, 0, 1]])
774
+
775
+ Example (preds is float tensor):
776
+ >>> from torchmetrics.functional.classification import multilabel_stat_scores
777
+ >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
778
+ >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
779
+ >>> multilabel_stat_scores(preds, target, num_labels=3, average='micro')
780
+ tensor([2, 1, 2, 1, 3])
781
+ >>> multilabel_stat_scores(preds, target, num_labels=3, average=None)
782
+ tensor([[1, 0, 1, 0, 1],
783
+ [0, 0, 1, 1, 1],
784
+ [1, 1, 0, 0, 1]])
785
+
786
+ Example (multidim tensors):
787
+ >>> from torchmetrics.functional.classification import multilabel_stat_scores
788
+ >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
789
+ >>> preds = torch.tensor(
790
+ ... [
791
+ ... [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
792
+ ... [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
793
+ ... ]
794
+ ... )
795
+ >>> multilabel_stat_scores(preds, target, num_labels=3, multidim_average='samplewise', average='micro')
796
+ tensor([[2, 3, 0, 1, 3],
797
+ [0, 2, 1, 3, 3]])
798
+ >>> multilabel_stat_scores(preds, target, num_labels=3, multidim_average='samplewise', average=None)
799
+ tensor([[[1, 1, 0, 0, 1],
800
+ [1, 1, 0, 0, 1],
801
+ [0, 1, 0, 1, 1]],
802
+ [[0, 0, 0, 2, 2],
803
+ [0, 2, 0, 0, 0],
804
+ [0, 0, 1, 1, 1]]])
805
+ """
806
+ if validate_args:
807
+ _multilabel_stat_scores_arg_validation(num_labels, threshold, average, multidim_average, ignore_index)
808
+ _multilabel_stat_scores_tensor_validation(preds, target, num_labels, multidim_average, ignore_index)
809
+ preds, target = _multilabel_stat_scores_format(preds, target, num_labels, threshold, ignore_index)
810
+ tp, fp, tn, fn = _multilabel_stat_scores_update(preds, target, multidim_average)
811
+ return _multilabel_stat_scores_compute(tp, fp, tn, fn, average, multidim_average)
812
+
813
+
814
+ def _del_column(data: Tensor, idx: int) -> Tensor:
815
+ """Delete the column at index."""
816
+ return torch.cat([data[:, :idx], data[:, (idx + 1) :]], 1)
817
+
818
+
819
+ def _drop_negative_ignored_indices(
820
+ preds: Tensor, target: Tensor, ignore_index: int, mode: DataType
821
+ ) -> Tuple[Tensor, Tensor]:
822
+ """Remove negative ignored indices.
823
+
824
+ Args:
825
+ preds: Predicted tensor
826
+ target: Ground truth tensor
827
+ ignore_index: Specify a class (label) to ignore. If given, this class index does not contribute
828
+ to the returned score, regardless of reduction method. If an index is ignored, and
829
+ ``reduce='macro'``, the class statistics for the ignored class will all be returned
830
+ as ``-1``.
831
+ mode: Mode of the input tensors
832
+
833
+ Return:
834
+ Tensors of preds and target without negative ignore target values.
835
+ """
836
+ if mode == mode.MULTIDIM_MULTICLASS and preds.dtype == torch.float:
837
+ # In case or multi-dimensional multi-class with logits
838
+ n_dims = len(preds.shape)
839
+ num_classes = preds.shape[1]
840
+ # move class dim to last so that we can flatten the additional dimensions into N: [N, C, ...] -> [N, ..., C]
841
+ preds = preds.transpose(1, n_dims - 1)
842
+
843
+ # flatten: [N, ..., C] -> [N', C]
844
+ preds = preds.reshape(-1, num_classes)
845
+ target = target.reshape(-1)
846
+
847
+ if mode in [mode.MULTICLASS, mode.MULTIDIM_MULTICLASS]:
848
+ preds = preds[target != ignore_index]
849
+ target = target[target != ignore_index]
850
+
851
+ return preds, target
852
+
853
+
854
+ def _stat_scores(
855
+ preds: Tensor,
856
+ target: Tensor,
857
+ reduce: Optional[str] = "micro",
858
+ ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
859
+ """Calculate the number of tp, fp, tn, fn.
860
+
861
+ Args:
862
+ preds: An ``(N, C)`` or ``(N, C, X)`` tensor of predictions (0 or 1)
863
+ target: An ``(N, C)`` or ``(N, C, X)`` tensor of true labels (0 or 1)
864
+ reduce: One of ``'micro'``, ``'macro'``, ``'samples'``
865
+
866
+ Return:
867
+ Returns a list of 4 tensors; tp, fp, tn, fn.
868
+ The shape of the returned tensors depends on the shape of the inputs
869
+ and the ``reduce`` parameter:
870
+
871
+ If inputs are of the shape ``(N, C)``, then:
872
+
873
+ - If ``reduce='micro'``, the returned tensors are 1 element tensors
874
+ - If ``reduce='macro'``, the returned tensors are ``(C,)`` tensors
875
+ - If ``reduce='samples'``, the returned tensors are ``(N,)`` tensors
876
+
877
+ If inputs are of the shape ``(N, C, X)``, then:
878
+
879
+ - If ``reduce='micro'``, the returned tensors are ``(N,)`` tensors
880
+ - If ``reduce='macro'``, the returned tensors are ``(N,C)`` tensors
881
+ - If ``reduce='samples'``, the returned tensors are ``(N,X)`` tensors
882
+ """
883
+ dim: Union[int, List[int]] = 1 # for "samples"
884
+ if reduce == "micro":
885
+ dim = [0, 1] if preds.ndim == 2 else [1, 2]
886
+ elif reduce == "macro":
887
+ dim = 0 if preds.ndim == 2 else 2
888
+
889
+ true_pred, false_pred = target == preds, target != preds
890
+ pos_pred, neg_pred = preds == 1, preds == 0
891
+
892
+ tp = (true_pred * pos_pred).sum(dim=dim)
893
+ fp = (false_pred * pos_pred).sum(dim=dim)
894
+
895
+ tn = (true_pred * neg_pred).sum(dim=dim)
896
+ fn = (false_pred * neg_pred).sum(dim=dim)
897
+
898
+ return tp.long(), fp.long(), tn.long(), fn.long()
899
+
900
+
901
+ def _stat_scores_update(
902
+ preds: Tensor,
903
+ target: Tensor,
904
+ reduce: Optional[str] = "micro",
905
+ mdmc_reduce: Optional[str] = None,
906
+ num_classes: Optional[int] = None,
907
+ top_k: Optional[int] = 1,
908
+ threshold: float = 0.5,
909
+ multiclass: Optional[bool] = None,
910
+ ignore_index: Optional[int] = None,
911
+ mode: DataType = None,
912
+ ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
913
+ """Updates and returns the number of true positives, false positives, true negatives, false negatives. Raises
914
+ ValueError if:
915
+
916
+ - The `ignore_index` is not valid
917
+ - When `ignore_index` is used with binary data
918
+ - When inputs are multi-dimensional multi-class, and the ``mdmc_reduce`` parameter is not set
919
+
920
+ Args:
921
+ preds: Predicted tensor
922
+ target: Ground truth tensor
923
+ reduce: Defines the reduction that is applied
924
+ mdmc_reduce: Defines how the multi-dimensional multi-class inputs are handled
925
+ num_classes: Number of classes. Necessary for (multi-dimensional) multi-class or multi-label data.
926
+ top_k: Number of the highest probability or logit score predictions considered finding the correct label,
927
+ relevant only for (multi-dimensional) multi-class inputs
928
+ threshold: Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case
929
+ of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities
930
+ multiclass: Used only in certain special cases, where you want to treat inputs as a different type
931
+ than what they appear to be
932
+ ignore_index: Specify a class (label) to ignore. If given, this class index does not contribute
933
+ to the returned score, regardless of reduction method. If an index is ignored, and
934
+ ``reduce='macro'``, the class statistics for the ignored class will all be returned
935
+ as ``-1``.
936
+ mode: Mode of the input tensors
937
+ """
938
+
939
+ _negative_index_dropped = False
940
+
941
+ if ignore_index is not None and ignore_index < 0 and mode is not None:
942
+ preds, target = _drop_negative_ignored_indices(preds, target, ignore_index, mode)
943
+ _negative_index_dropped = True
944
+
945
+ preds, target, _ = _input_format_classification(
946
+ preds,
947
+ target,
948
+ threshold=threshold,
949
+ num_classes=num_classes,
950
+ multiclass=multiclass,
951
+ top_k=top_k,
952
+ ignore_index=ignore_index,
953
+ )
954
+
955
+ if ignore_index is not None and ignore_index >= preds.shape[1]:
956
+ raise ValueError(f"The `ignore_index` {ignore_index} is not valid for inputs with {preds.shape[1]} classes")
957
+
958
+ if ignore_index is not None and preds.shape[1] == 1:
959
+ raise ValueError("You can not use `ignore_index` with binary data.")
960
+
961
+ if preds.ndim == 3:
962
+ if not mdmc_reduce:
963
+ raise ValueError(
964
+ "When your inputs are multi-dimensional multi-class, you have to set the `mdmc_reduce` parameter"
965
+ )
966
+ if mdmc_reduce == "global":
967
+ preds = torch.transpose(preds, 1, 2).reshape(-1, preds.shape[1])
968
+ target = torch.transpose(target, 1, 2).reshape(-1, target.shape[1])
969
+
970
+ # Delete what is in ignore_index, if applicable (and classes don't matter):
971
+ if ignore_index is not None and reduce != "macro" and not _negative_index_dropped:
972
+ preds = _del_column(preds, ignore_index)
973
+ target = _del_column(target, ignore_index)
974
+
975
+ tp, fp, tn, fn = _stat_scores(preds, target, reduce=reduce)
976
+
977
+ # Take care of ignore_index
978
+ if ignore_index is not None and reduce == "macro" and not _negative_index_dropped:
979
+ tp[..., ignore_index] = -1
980
+ fp[..., ignore_index] = -1
981
+ tn[..., ignore_index] = -1
982
+ fn[..., ignore_index] = -1
983
+
984
+ return tp, fp, tn, fn
985
+
986
+
987
+ def _stat_scores_compute(tp: Tensor, fp: Tensor, tn: Tensor, fn: Tensor) -> Tensor:
988
+ """Computes the number of true positives, false positives, true negatives, false negatives. Concatenates the
989
+ input tensors along with the support into one output.
990
+
991
+ Args:
992
+ tp: True positives
993
+ fp: False positives
994
+ tn: True negatives
995
+ fn: False negatives
996
+ """
997
+ stats = [
998
+ tp.unsqueeze(-1),
999
+ fp.unsqueeze(-1),
1000
+ tn.unsqueeze(-1),
1001
+ fn.unsqueeze(-1),
1002
+ tp.unsqueeze(-1) + fn.unsqueeze(-1), # support
1003
+ ]
1004
+ outputs: Tensor = torch.cat(stats, -1)
1005
+ outputs = torch.where(outputs < 0, tensor(-1, device=outputs.device), outputs)
1006
+
1007
+ return outputs
1008
+
1009
+
1010
+ def _reduce_stat_scores(
1011
+ numerator: Tensor,
1012
+ denominator: Tensor,
1013
+ weights: Optional[Tensor],
1014
+ average: Optional[str],
1015
+ mdmc_average: Optional[str],
1016
+ zero_division: int = 0,
1017
+ ) -> Tensor:
1018
+ """Reduces scores of type ``numerator/denominator`` or.
1019
+
1020
+ ``weights * (numerator/denominator)``, if ``average='weighted'``.
1021
+
1022
+ Args:
1023
+ numerator: A tensor with numerator numbers.
1024
+ denominator: A tensor with denominator numbers. If a denominator is
1025
+ negative, the class will be ignored (if averaging), or its score
1026
+ will be returned as ``nan`` (if ``average=None``).
1027
+ If the denominator is zero, then ``zero_division`` score will be
1028
+ used for those elements.
1029
+ weights: A tensor of weights to be used if ``average='weighted'``.
1030
+ average: The method to average the scores
1031
+ mdmc_average: The method to average the scores if inputs were multi-dimensional multi-class (MDMC)
1032
+ zero_division: The value to use for the score if denominator equals zero.
1033
+ """
1034
+ numerator, denominator = numerator.float(), denominator.float()
1035
+ zero_div_mask = denominator == 0
1036
+ ignore_mask = denominator < 0
1037
+
1038
+ if weights is None:
1039
+ weights = torch.ones_like(denominator)
1040
+ else:
1041
+ weights = weights.float()
1042
+
1043
+ numerator = torch.where(
1044
+ zero_div_mask, tensor(zero_division, dtype=numerator.dtype, device=numerator.device), numerator
1045
+ )
1046
+ denominator = torch.where(
1047
+ zero_div_mask | ignore_mask, tensor(1.0, dtype=denominator.dtype, device=denominator.device), denominator
1048
+ )
1049
+ weights = torch.where(ignore_mask, tensor(0.0, dtype=weights.dtype, device=weights.device), weights)
1050
+
1051
+ if average not in (AverageMethod.MICRO, AverageMethod.NONE, None):
1052
+ weights = weights / weights.sum(dim=-1, keepdim=True)
1053
+
1054
+ scores = weights * (numerator / denominator)
1055
+
1056
+ # This is in case where sum(weights) = 0, which happens if we ignore the only present class with average='weighted'
1057
+ scores = torch.where(torch.isnan(scores), tensor(zero_division, dtype=scores.dtype, device=scores.device), scores)
1058
+
1059
+ if mdmc_average == MDMCAverageMethod.SAMPLEWISE:
1060
+ scores = scores.mean(dim=0)
1061
+ ignore_mask = ignore_mask.sum(dim=0).bool()
1062
+
1063
+ if average in (AverageMethod.NONE, None):
1064
+ scores = torch.where(ignore_mask, tensor(float("nan"), device=scores.device), scores)
1065
+ else:
1066
+ scores = scores.sum()
1067
+
1068
+ return scores
1069
+
1070
+
1071
+ def stat_scores(
1072
+ preds: Tensor,
1073
+ target: Tensor,
1074
+ task: Literal["binary", "multiclass", "multilabel"],
1075
+ threshold: float = 0.5,
1076
+ num_classes: Optional[int] = None,
1077
+ num_labels: Optional[int] = None,
1078
+ average: Optional[Literal["micro", "macro", "weighted", "none"]] = "micro",
1079
+ multidim_average: Optional[Literal["global", "samplewise"]] = "global",
1080
+ top_k: Optional[int] = 1,
1081
+ ignore_index: Optional[int] = None,
1082
+ validate_args: bool = True,
1083
+ ) -> Tensor:
1084
+ r"""Computes the number of true positives, false positives, true negatives, false negatives and the support.
1085
+
1086
+ This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
1087
+ ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
1088
+ :func:`binary_stat_scores`, :func:`multiclass_stat_scores` and :func:`multilabel_stat_scores` for the specific
1089
+ details of each argument influence and examples.
1090
+
1091
+ Legacy Example:
1092
+ >>> preds = torch.tensor([1, 0, 2, 1])
1093
+ >>> target = torch.tensor([1, 1, 2, 0])
1094
+ >>> stat_scores(preds, target, task='multiclass', num_classes=3, average='micro')
1095
+ tensor([2, 2, 6, 2, 4])
1096
+ >>> stat_scores(preds, target, task='multiclass', num_classes=3, average=None)
1097
+ tensor([[0, 1, 2, 1, 1],
1098
+ [1, 1, 1, 1, 2],
1099
+ [1, 0, 3, 0, 1]])
1100
+ """
1101
+ assert multidim_average is not None
1102
+ if task == "binary":
1103
+ return binary_stat_scores(preds, target, threshold, multidim_average, ignore_index, validate_args)
1104
+ if task == "multiclass":
1105
+ assert isinstance(num_classes, int)
1106
+ assert isinstance(top_k, int)
1107
+ return multiclass_stat_scores(
1108
+ preds, target, num_classes, average, top_k, multidim_average, ignore_index, validate_args
1109
+ )
1110
+ if task == "multilabel":
1111
+ assert isinstance(num_labels, int)
1112
+ return multilabel_stat_scores(
1113
+ preds, target, num_labels, threshold, average, multidim_average, ignore_index, validate_args
1114
+ )
1115
+ raise ValueError(
1116
+ f"Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got {task}"
1117
+ )
wemm/lib/python3.10/site-packages/torchmetrics/functional/nominal/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from torchmetrics.functional.nominal.cramers import cramers_v, cramers_v_matrix # noqa: F401
15
+ from torchmetrics.functional.nominal.pearson import ( # noqa: F401
16
+ pearsons_contingency_coefficient,
17
+ pearsons_contingency_coefficient_matrix,
18
+ )
19
+ from torchmetrics.functional.nominal.theils_u import theils_u, theils_u_matrix # noqa: F401
20
+ from torchmetrics.functional.nominal.tschuprows import tschuprows_t, tschuprows_t_matrix # noqa: F401
wemm/lib/python3.10/site-packages/torchmetrics/functional/regression/concordance.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The PyTorch Lightning team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+ from torch import Tensor
16
+
17
+ from torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update
18
+
19
+
20
+ def _concordance_corrcoef_compute(
21
+ mean_x: Tensor,
22
+ mean_y: Tensor,
23
+ var_x: Tensor,
24
+ var_y: Tensor,
25
+ corr_xy: Tensor,
26
+ nb: Tensor,
27
+ ) -> Tensor:
28
+ """Computes the final concordance correlation coefficient based on accumulated statistics."""
29
+ pearson = _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)
30
+ return 2.0 * pearson * var_x.sqrt() * var_y.sqrt() / (var_x + var_y + (mean_x - mean_y) ** 2)
31
+
32
+
33
+ def concordance_corrcoef(preds: Tensor, target: Tensor) -> Tensor:
34
+ r"""Computes concordance correlation coefficient that measures the agreement between two variables. It is
35
+ defined as.
36
+
37
+ .. math::
38
+ \rho_c = \frac{2 \rho \sigma_x \sigma_y}{\sigma_x^2 + \sigma_y^2 + (\mu_x - \mu_y)^2}
39
+
40
+ where :math:`\mu_x, \mu_y` is the means for the two variables, :math:`\sigma_x^2, \sigma_y^2` are the corresponding
41
+ variances and \rho is the pearson correlation coefficient between the two variables.
42
+
43
+ Args:
44
+ preds: estimated scores
45
+ target: ground truth scores
46
+
47
+ Example (single output regression):
48
+ >>> from torchmetrics.functional import concordance_corrcoef
49
+ >>> target = torch.tensor([3, -0.5, 2, 7])
50
+ >>> preds = torch.tensor([2.5, 0.0, 2, 8])
51
+ >>> concordance_corrcoef(preds, target)
52
+ tensor([0.9777])
53
+
54
+ Example (multi output regression):
55
+ >>> from torchmetrics.functional import concordance_corrcoef
56
+ >>> target = torch.tensor([[3, -0.5], [2, 7]])
57
+ >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
58
+ >>> concordance_corrcoef(preds, target)
59
+ tensor([0.7273, 0.9887])
60
+ """
61
+ d = preds.shape[1] if preds.ndim == 2 else 1
62
+ _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)
63
+ mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()
64
+ var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()
65
+ mean_x, mean_y, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(
66
+ preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]
67
+ )
68
+ return _concordance_corrcoef_compute(mean_x, mean_y, var_x, var_y, corr_xy, nb)