| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ SQuAD metric. """ |
|
|
| import datasets |
|
|
| import evaluate |
|
|
| from .compute_score import compute_score |
|
|
|
|
| _CITATION = """\ |
| @inproceedings{Rajpurkar2016SQuAD10, |
| title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, |
| author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, |
| booktitle={EMNLP}, |
| year={2016} |
| } |
| """ |
|
|
| _DESCRIPTION = """ |
| This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). |
| |
| Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by |
| crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, |
| from the corresponding reading passage, or the question might be unanswerable. |
| """ |
|
|
| _KWARGS_DESCRIPTION = """ |
| Computes SQuAD scores (F1 and EM). |
| Args: |
| predictions: List of question-answers dictionaries with the following key-values: |
| - 'id': id of the question-answer pair as given in the references (see below) |
| - 'prediction_text': the text of the answer |
| references: List of question-answers dictionaries with the following key-values: |
| - 'id': id of the question-answer pair (see above), |
| - 'answers': a Dict in the SQuAD dataset format |
| { |
| 'text': list of possible texts for the answer, as a list of strings |
| 'answer_start': list of start positions for the answer, as a list of ints |
| } |
| Note that answer_start values are not taken into account to compute the metric. |
| Returns: |
| 'exact_match': Exact match (the normalized answer exactly match the gold answer) |
| 'f1': The F-score of predicted tokens versus the gold answer |
| Examples: |
| |
| >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] |
| >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] |
| >>> squad_metric = evaluate.load("squad") |
| >>> results = squad_metric.compute(predictions=predictions, references=references) |
| >>> print(results) |
| {'exact_match': 100.0, 'f1': 100.0} |
| """ |
|
|
|
|
| @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) |
| class Squad(evaluate.Metric): |
| def _info(self): |
| return evaluate.MetricInfo( |
| description=_DESCRIPTION, |
| citation=_CITATION, |
| inputs_description=_KWARGS_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")}, |
| "references": { |
| "id": datasets.Value("string"), |
| "answers": datasets.features.Sequence( |
| { |
| "text": datasets.Value("string"), |
| "answer_start": datasets.Value("int32"), |
| } |
| ), |
| }, |
| } |
| ), |
| codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], |
| reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], |
| ) |
|
|
| def _compute(self, predictions, references): |
| pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} |
| dataset = [ |
| { |
| "paragraphs": [ |
| { |
| "qas": [ |
| { |
| "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], |
| "id": ref["id"], |
| } |
| for ref in references |
| ] |
| } |
| ] |
| } |
| ] |
| score = compute_score(dataset=dataset, predictions=pred_dict) |
| return score |
|
|