heliumind commited on
Commit
b434b3b
Β·
verified Β·
1 Parent(s): 4e9d276

Create seqeval.py

Browse files
Files changed (1) hide show
  1. seqeval.py +183 -0
seqeval.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ seqeval metric. """
15
+
16
+ import importlib
17
+ from typing import List, Optional, Union
18
+
19
+ import datasets
20
+ from seqeval.metrics import accuracy_score, classification_report
21
+
22
+ import evaluate
23
+
24
+
25
+ _CITATION = """\
26
+ @inproceedings{ramshaw-marcus-1995-text,
27
+ title = "Text Chunking using Transformation-Based Learning",
28
+ author = "Ramshaw, Lance and
29
+ Marcus, Mitch",
30
+ booktitle = "Third Workshop on Very Large Corpora",
31
+ year = "1995",
32
+ url = "https://www.aclweb.org/anthology/W95-0107",
33
+ }
34
+ @misc{seqeval,
35
+ title={{seqeval}: A Python framework for sequence labeling evaluation},
36
+ url={https://github.com/chakki-works/seqeval},
37
+ note={Software available from https://github.com/chakki-works/seqeval},
38
+ author={Hiroki Nakayama},
39
+ year={2018},
40
+ }
41
+ """
42
+
43
+ _DESCRIPTION = """\
44
+ seqeval is a Python framework for sequence labeling evaluation.
45
+ seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on.
46
+
47
+ This is well-tested by using the Perl script conlleval, which can be used for
48
+ measuring the performance of a system that has processed the CoNLL-2000 shared task data.
49
+
50
+ seqeval supports following formats:
51
+ IOB1
52
+ IOB2
53
+ IOE1
54
+ IOE2
55
+ IOBES
56
+
57
+ See the [README.md] file at https://github.com/chakki-works/seqeval for more information.
58
+ """
59
+
60
+ _KWARGS_DESCRIPTION = """
61
+ Produces labelling scores along with its sufficient statistics
62
+ from a source against one or more references.
63
+
64
+ Args:
65
+ predictions: List of List of predicted labels (Estimated targets as returned by a tagger)
66
+ references: List of List of reference labels (Ground truth (correct) target values)
67
+ suffix: True if the IOB prefix is after type, False otherwise. default: False
68
+ scheme: Specify target tagging scheme. Should be one of ["IOB1", "IOB2", "IOE1", "IOE2", "IOBES", "BILOU"].
69
+ default: None
70
+ mode: Whether to count correct entity labels with incorrect I/B tags as true positives or not.
71
+ If you want to only count exact matches, pass mode="strict". default: None.
72
+ sample_weight: Array-like of shape (n_samples,), weights for individual samples. default: None
73
+ zero_division: Which value to substitute as a metric value when encountering zero division. Should be on of 0, 1,
74
+ "warn". "warn" acts as 0, but the warning is raised.
75
+
76
+ Returns:
77
+ 'scores': dict. Summary of the scores for mirco, macro, weighed average and per type
78
+ Micro-averaged
79
+ 'accuracy': accuracy,
80
+ 'precision': precision,
81
+ 'recall': recall,
82
+ 'f1': F1 score, also known as balanced F-score or F-measure,
83
+ Macro-averaged:
84
+ 'accuracy': accuracy,
85
+ 'precision': precision,
86
+ 'recall': recall,
87
+ 'f1': F1 score, also known as balanced F-score or F-measure,
88
+ Weighted-averaged:
89
+ 'accuracy': accuracy,
90
+ 'precision': precision,
91
+ 'recall': recall,
92
+ 'f1': F1 score, also known as balanced F-score or F-measure,
93
+ Per type:
94
+ 'precision': precision,
95
+ 'recall': recall,
96
+ 'f1': F1 score, also known as balanced F-score or F-measure
97
+ Examples:
98
+
99
+ >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
100
+ >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
101
+ >>> seqeval = evaluate.load("seqeval")
102
+ >>> results = seqeval.compute(predictions=predictions, references=references)
103
+ >>> print(list(results.keys()))
104
+ ['MISC', 'PER', 'overall_precision', 'overall_recall', 'overall_f1', 'overall_accuracy']
105
+ >>> print(results["overall_f1"])
106
+ 0.5
107
+ >>> print(results["PER"]["f1"])
108
+ 1.0
109
+ """
110
+
111
+
112
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
113
+ class Seqeval(evaluate.Metric):
114
+ def _info(self):
115
+ return evaluate.MetricInfo(
116
+ description=_DESCRIPTION,
117
+ citation=_CITATION,
118
+ homepage="https://github.com/chakki-works/seqeval",
119
+ inputs_description=_KWARGS_DESCRIPTION,
120
+ features=datasets.Features(
121
+ {
122
+ "predictions": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
123
+ "references": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
124
+ }
125
+ ),
126
+ codebase_urls=["https://github.com/chakki-works/seqeval"],
127
+ reference_urls=["https://github.com/chakki-works/seqeval"],
128
+ )
129
+
130
+ def _compute(
131
+ self,
132
+ predictions,
133
+ references,
134
+ suffix: bool = False,
135
+ scheme: Optional[str] = None,
136
+ mode: Optional[str] = None,
137
+ sample_weight: Optional[List[int]] = None,
138
+ zero_division: Union[str, int] = "warn",
139
+ ):
140
+ if scheme is not None:
141
+ try:
142
+ scheme_module = importlib.import_module("seqeval.scheme")
143
+ scheme = getattr(scheme_module, scheme)
144
+ except AttributeError:
145
+ raise ValueError(f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {scheme}")
146
+ report = classification_report(
147
+ y_true=references,
148
+ y_pred=predictions,
149
+ suffix=suffix,
150
+ output_dict=True,
151
+ scheme=scheme,
152
+ mode=mode,
153
+ sample_weight=sample_weight,
154
+ zero_division=zero_division,
155
+ )
156
+ macro_score = report.pop("macro avg")
157
+ weighted_score = report.pop("weighted avg")
158
+ micro_score = report.pop("micro avg")
159
+
160
+ scores = {
161
+ type_name: {
162
+ "precision": score["precision"],
163
+ "recall": score["recall"],
164
+ "f1": score["f1-score"],
165
+ "number": score["support"],
166
+ }
167
+ for type_name, score in report.items()
168
+ }
169
+ scores["micro_precision"] = micro_score["precision"]
170
+ scores["micro_recall"] = micro_score["recall"]
171
+ scores["micro_f1"] = micro_score["f1-score"]
172
+
173
+ scores["weighted_precision"] = weighted_score["precision"]
174
+ scores["weighted_recall"] = weighted_score["recall"]
175
+ scores["weighted_f1"] = weighted_score["f1-score"]
176
+
177
+ scores["macro_precision"] = macro_score["precision"]
178
+ scores["macro_recall"] = macro_score["recall"]
179
+ scores["macro_f1"] = macro_score["f1-score"]
180
+
181
+ scores["overall_accuracy"] = accuracy_score(y_true=references, y_pred=predictions)
182
+
183
+ return scores