| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """TODO: Add a description here.""" |
| |
|
| | import evaluate |
| | import datasets |
| | import re |
| | import string |
| | from tqdm import tqdm |
| | from collections import Counter |
| |
|
| | |
| | _CITATION = """\ |
| | @InProceedings{huggingface:module, |
| | title = {A great new module}, |
| | authors={huggingface, Inc.}, |
| | year={2020} |
| | } |
| | """ |
| |
|
| | |
| | _DESCRIPTION = """\ |
| | This new module is designed to solve this great ML task and is crafted with a lot of care. |
| | """ |
| |
|
| |
|
| | |
| | _KWARGS_DESCRIPTION = """ |
| | Calculates how good are predictions given some references, using certain scores |
| | Args: |
| | predictions: list of predictions to score. Each predictions |
| | should be a string with tokens separated by spaces. |
| | references: list of reference for each prediction. Each |
| | reference should be a string with tokens separated by spaces. |
| | Returns: |
| | accuracy: description of the first score, |
| | another_score: description of the second score, |
| | Examples: |
| | Examples should be written in doctest format, and should illustrate how |
| | to use the function. |
| | |
| | >>> my_new_module = evaluate.load("my_new_module") |
| | >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) |
| | >>> print(results) |
| | {'accuracy': 1.0} |
| | """ |
| |
|
| | |
| | BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" |
| |
|
| | def remove_(text: str)-> str: |
| | ''' ๋ถํ์ํ ๊ธฐํธ ์ ๊ฑฐ ''' |
| | text = re.sub("'", " ", text) |
| | text = re.sub('"', " ", text) |
| | text = re.sub('ใ', " ", text) |
| | text = re.sub('ใ', " ", text) |
| | text = re.sub('<', " ", text) |
| | text = re.sub('>', " ", text) |
| | text = re.sub('ใ', " ", text) |
| | text = re.sub('ใ', " ", text) |
| | text = re.sub("\(", " ", text) |
| | text = re.sub("\)", " ", text) |
| | text = re.sub("โ", " ", text) |
| | text = re.sub("โ", " ", text) |
| | return text |
| |
|
| | def white_space_fix(text: str)-> str: |
| | '''์ฐ์๋ ๊ณต๋ฐฑ์ผ ๊ฒฝ์ฐ ํ๋์ ๊ณต๋ฐฑ์ผ๋ก ๋์ฒด''' |
| | return ' '.join(text.split()) |
| |
|
| | def remove_punc(text: str)-> str: |
| | '''๊ตฌ๋์ ์ ๊ฑฐ''' |
| | exclude = set(string.punctuation) |
| | return ''.join(ch for ch in text if ch not in exclude) |
| |
|
| | def lower(text: str)-> str: |
| | '''์๋ฌธ์ ์ ํ''' |
| | return text.lower() |
| |
|
| | @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) |
| | class ecqa(evaluate.Metric): |
| | """TODO: Short description of my evaluation module.""" |
| |
|
| | def _info(self): |
| | |
| | return evaluate.MetricInfo( |
| | |
| | module_type="metric", |
| | description=_DESCRIPTION, |
| | citation=_CITATION, |
| | inputs_description=_KWARGS_DESCRIPTION, |
| | |
| | features=datasets.Features({ |
| | 'predictions': datasets.Value('string'), |
| | 'references': datasets.Value('string'), |
| | }), |
| | |
| | homepage="http://module.homepage", |
| | |
| | codebase_urls=["http://github.com/path/to/codebase/of/new_module"], |
| | reference_urls=["http://path.to.reference.url/new_module"] |
| | ) |
| |
|
| | def _download_and_prepare(self, dl_manager): |
| | """Optional: download external resources useful to compute the scores""" |
| | |
| | pass |
| | |
| | def __normalize(self, text: str): |
| | text = remove_(text) |
| | text = lower(text) |
| | text = remove_punc(text) |
| | return white_space_fix(text) |
| |
|
| | def __compute_f1(self, prediction: str, reference: str)-> tuple[float, float, float]: |
| | predicted_tokens = self.__normalize(prediction).split() |
| | referenced_tokens = self.__normalize(reference).split() |
| |
|
| | predictied_chars = [] |
| | for token in predicted_tokens: |
| | predictied_chars += [char for char in token] |
| |
|
| | referenced_chars = [] |
| | for token in referenced_tokens: |
| | referenced_chars += [char for char in token] |
| |
|
| | true_positive = Counter(predictied_chars) & Counter(referenced_chars) |
| | n_true_positive = sum(true_positive.values()) |
| | if n_true_positive == 0: |
| | return 0, 0, 0 |
| |
|
| | precision = 1.0 * n_true_positive / len(predictied_chars) |
| | recall = 1.0 * n_true_positive / len(referenced_chars) |
| | f1 = (2 * precision * recall) / (precision + recall) |
| |
|
| | return f1, recall, precision |
| | |
| | def _compute(self, predictions: list[str], references: list[str]): |
| | """Returns the scores""" |
| | |
| | assert isinstance(predictions, list) |
| | assert isinstance(references, list) |
| | assert len(predictions) == len(references) |
| |
|
| | f1_acc = precision_acc = recall_acc = total = 0 |
| | for prediction, reference in tqdm(zip(predictions, references)): |
| | total += 1 |
| | f1_computed, precision_computed, recall_computed = self.__compute_f1(prediction, reference) |
| |
|
| | f1_acc += f1_computed |
| | precision_acc += precision_computed |
| | recall_acc += recall_computed |
| |
|
| | f1, precision, recall = [ |
| | |
| | 100.0 * computed / total |
| |
|
| | for computed in [ |
| | f1_acc, |
| | precision_acc, |
| | recall_acc |
| | ] |
| | ] |
| |
|
| | return { |
| | "f1": f1, |
| | "precision": precision, |
| | "recall": recall |
| | } |