| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """Exact match test for model comparison.""" |
|
|
| import datasets |
| import numpy as np |
|
|
| import evaluate |
|
|
|
|
| _DESCRIPTION = """ |
| Returns the rate at which the predictions of one model exactly match those of another model. |
| """ |
|
|
|
|
| _KWARGS_DESCRIPTION = """ |
| Args: |
| predictions1 (`list` of `int`): Predicted labels for model 1. |
| predictions2 (`list` of `int`): Predicted labels for model 2. |
| |
| Returns: |
| exact_match (`float`): Dictionary containing exact_match rate. Possible values are between 0.0 and 1.0, inclusive. |
| |
| Examples: |
| >>> exact_match = evaluate.load("exact_match", module_type="comparison") |
| >>> results = exact_match.compute(predictions1=[1, 1, 1], predictions2=[1, 1, 1]) |
| >>> print(results) |
| {'exact_match': 1.0} |
| """ |
|
|
|
|
| _CITATION = """ |
| """ |
|
|
|
|
| @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) |
| class ExactMatch(evaluate.Comparison): |
| def _info(self): |
| return evaluate.ComparisonInfo( |
| module_type="comparison", |
| description=_DESCRIPTION, |
| citation=_CITATION, |
| inputs_description=_KWARGS_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "predictions1": datasets.Value("int64"), |
| "predictions2": datasets.Value("int64"), |
| } |
| ), |
| ) |
|
|
| def _compute(self, predictions1, predictions2): |
| score_list = [p1 == p2 for p1, p2 in zip(predictions1, predictions2)] |
| return {"exact_match": np.mean(score_list)} |
|
|