mcq_eval / mcq_eval.py
Rodrigo Ferreira Rodrigues
adding bert score
440ceb6
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
import evaluate
import datasets
import numpy as np
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}
"""
# TODO: Add description of the module here
_DESCRIPTION = """\
This metric is designed to evaluate MCQ generations tasks.
"""
# TODO: Add description of the arguments of the module here
_KWARGS_DESCRIPTION = """
Calculates Accuracy and Blue-1 between generations and gold answers in a MCQ context.
Args:
generations: list of predictions to score. Each predictions
should be a string generated by a LM model.
golds: list of reference for each prediction. Each
reference should be a string only containing one letter (eg. A, B, C...).
Returns:
accuracy: ratio of good answers,
bleu-1: calculated by the module evaluate
Examples:
Here is an exemple on how to use the metric:
>>> metric = evaluate.load("rfr2003/MQC_eval")
>>> results = metric.compute(generations=["A", "B"], golds=["A", "D"])
>>> print(results)
{'accuracy': 0.5, 'bleu-1': 0.5}
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class MCQ_eval(evaluate.Metric):
"""TODO: Short description of my evaluation module."""
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features({
'generations': datasets.Value('string'),
'golds': datasets.Value('string'),
}),
# Homepage of the module for documentation
#homepage="http://module.homepage",
# Additional links to the codebase or references
#codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
#reference_urls=["http://path.to.reference.url/new_module"]
)
def _download_and_prepare(self, dl_manager):
"""Optional: download external resources useful to compute the scores"""
self.bleu = evaluate.load('bleu')
self.bert_score = evaluate.load('bertscore')
def _compute(self, generations, golds):
"""Returns the scores"""
assert len(generations) == len(golds)
correct, total = 0, 0
predictions, references = [], []
for gen, gold in zip(generations, golds):
gen = gen.strip().upper()
gold = gold.upper()
if len(gen) > 1:
gen = gen[0]
if gen == gold:
correct += 1
total += 1
predictions.append(gen)
references.append(gold)
metrics = {}
metrics = {f"bert_score_{k}": np.mean(v).item() for k,v in self.bert_score.compute(predictions=predictions, references=references, lang="en").items() if k in ['recall', 'precision', 'f1']}
metrics.update({
'accuracy': correct/total,
'bleu-1': self.bleu.compute(predictions=predictions, references=references, max_order=1)['bleu']
})
return metrics