ny_poi_evaluate / ny_poi_evaluate.py
Rodrigo Ferreira Rodrigues
Correcting input types
1cd4217
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
import evaluate
import datasets
import re
import ast
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}
"""
# TODO: Add description of the module here
_DESCRIPTION = """\
This metric aims to evaluate the dataset NY-POI present in the GeoBenchmark.
"""
# TODO: Add description of the arguments of the module here
_KWARGS_DESCRIPTION = """
Calculates accuracy@1, accuracy@3, accuracy@5 and accuracy@10 between a list of POI ids generated by an LM and a gold POI id.
Args:
generations: list of predictions to score. Each predictions
should be a string generated by a LM model.
golds: list of reference for each prediction. Each
reference should be an int representing the gold POI id.
Returns:
accuracy@1: 1 if the gold POI id is the first id returned by the model, 0 otherwise.
accuracy@3: 1 if the gold POI id is the 3 first ids returned by the model, 0 otherwise.
accuracy@5: 1 if the gold POI id is the 5 first ids returned by the model, 0 otherwise.
accuracy@10: 1 if the gold POI id is the 10 first ids returned by the model, 0 otherwise.
Examples:
>>> my_new_module = evaluate.load("rfr2003/ny_poi_evaluate")
>>> results = my_new_module.compute(generations=["[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"], golds=[1, 3], n_chances=10)
>>> print(results)
{'accuracy@1': 0.5, 'accuracy@3': 1.0, 'accuracy@5': 1.0, 'accuracy@10': 1.0 }
"""
# TODO: Define external resources urls if needed
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class NY_POI_evaluate(evaluate.Metric):
"""TODO: Short description of my evaluation module."""
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
features=datasets.Features({
'generations': datasets.Value('string'),
'golds': datasets.Value('int64'),
}),
)
def _download_and_prepare(self, dl_manager):
"""Optional: download external resources useful to compute the scores"""
# TODO: Download external resources if needed
pass
def _compute(self, generations, golds, n_chances=10):
"""Returns the scores"""
# TODO: Compute the different scores of the module
assert len(generations) == len(golds)
correct = {1: 0, 3: 0, 5: 0, 10: 0}
total = 0
for gen, gold in zip(generations, golds):
f_gold = int(gold)
try:
catch = ast.literal_eval(gen)
except:
subs = ['[', ']', '(', ')', ' ']
pattern = r'(' + '|'.join(map(re.escape, subs)) + r')'
catch = re.sub(pattern, "", gen).split(',')
f_ans = []
for a in catch:
try:
f_ans.append(int(a))
except:
continue
i = 0
while i < n_chances:
if i >= len(f_ans):
break
if f_ans[i] == f_gold:
for key in correct:
if i < key:
correct[key] += 1
i += 1
total += 1
metrics = {}
metrics.update({
'accuracy@1': correct[1]/total,
'accuracy@3': correct[3]/total,
'accuracy@5': correct[5]/total,
'accuracy@10': correct[10]/total,
})
return metrics