Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- testbed/huggingface__datasets/.dvcignore +3 -0
- testbed/huggingface__datasets/.pre-commit-config.yaml +9 -0
- testbed/huggingface__datasets/.zenodo.json +130 -0
- testbed/huggingface__datasets/ADD_NEW_DATASET.md +8 -0
- testbed/huggingface__datasets/AUTHORS +8 -0
- testbed/huggingface__datasets/CITATION.cff +150 -0
- testbed/huggingface__datasets/CODE_OF_CONDUCT.md +132 -0
- testbed/huggingface__datasets/CONTRIBUTING.md +122 -0
- testbed/huggingface__datasets/LICENSE +202 -0
- testbed/huggingface__datasets/Makefile +20 -0
- testbed/huggingface__datasets/README.md +212 -0
- testbed/huggingface__datasets/SECURITY.md +33 -0
- testbed/huggingface__datasets/additional-tests-requirements.txt +4 -0
- testbed/huggingface__datasets/benchmarks/benchmark_array_xd.py +142 -0
- testbed/huggingface__datasets/benchmarks/benchmark_iterating.py +98 -0
- testbed/huggingface__datasets/benchmarks/benchmark_map_filter.py +71 -0
- testbed/huggingface__datasets/benchmarks/format.py +49 -0
- testbed/huggingface__datasets/benchmarks/results/benchmark_array_xd.json +1 -0
- testbed/huggingface__datasets/benchmarks/results/benchmark_getitem_100B.json +1 -0
- testbed/huggingface__datasets/benchmarks/results/benchmark_iterating.json +1 -0
- testbed/huggingface__datasets/benchmarks/results/benchmark_map_filter.json +1 -0
- testbed/huggingface__datasets/benchmarks/utils.py +64 -0
- testbed/huggingface__datasets/dvc.yaml +40 -0
- testbed/huggingface__datasets/metrics/accuracy/README.md +97 -0
- testbed/huggingface__datasets/metrics/bertscore/README.md +111 -0
- testbed/huggingface__datasets/metrics/bleurt/bleurt.py +122 -0
- testbed/huggingface__datasets/metrics/cer/cer.py +158 -0
- testbed/huggingface__datasets/metrics/cer/test_cer.py +128 -0
- testbed/huggingface__datasets/metrics/chrf/README.md +133 -0
- testbed/huggingface__datasets/metrics/chrf/chrf.py +174 -0
- testbed/huggingface__datasets/metrics/code_eval/README.md +128 -0
- testbed/huggingface__datasets/metrics/code_eval/code_eval.py +212 -0
- testbed/huggingface__datasets/metrics/code_eval/execute.py +234 -0
- testbed/huggingface__datasets/metrics/comet/README.md +128 -0
- testbed/huggingface__datasets/metrics/comet/comet.py +143 -0
- testbed/huggingface__datasets/metrics/competition_math/README.md +103 -0
- testbed/huggingface__datasets/metrics/competition_math/competition_math.py +94 -0
- testbed/huggingface__datasets/metrics/coval/README.md +170 -0
- testbed/huggingface__datasets/metrics/coval/coval.py +319 -0
- testbed/huggingface__datasets/metrics/cuad/README.md +112 -0
- testbed/huggingface__datasets/metrics/cuad/cuad.py +115 -0
- testbed/huggingface__datasets/metrics/cuad/evaluate.py +205 -0
- testbed/huggingface__datasets/metrics/exact_match/README.md +103 -0
- testbed/huggingface__datasets/metrics/exact_match/exact_match.py +135 -0
- testbed/huggingface__datasets/metrics/f1/README.md +120 -0
- testbed/huggingface__datasets/metrics/f1/f1.py +123 -0
- testbed/huggingface__datasets/metrics/frugalscore/README.md +105 -0
- testbed/huggingface__datasets/metrics/frugalscore/frugalscore.py +116 -0
- testbed/huggingface__datasets/metrics/glue/README.md +105 -0
- testbed/huggingface__datasets/metrics/glue/glue.py +155 -0
testbed/huggingface__datasets/.dvcignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Add patterns of files dvc should ignore, which could improve
|
| 2 |
+
# the performance. Learn more at
|
| 3 |
+
# https://dvc.org/doc/user-guide/dvcignore
|
testbed/huggingface__datasets/.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
repos:
|
| 2 |
+
- repo: https://github.com/charliermarsh/ruff-pre-commit # https://github.com/charliermarsh/ruff#usage
|
| 3 |
+
rev: 'v0.1.5'
|
| 4 |
+
hooks:
|
| 5 |
+
# Run the linter.
|
| 6 |
+
- id: ruff
|
| 7 |
+
args: [ --fix ]
|
| 8 |
+
# Run the formatter.
|
| 9 |
+
- id: ruff-format
|
testbed/huggingface__datasets/.zenodo.json
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"license": "Apache-2.0",
|
| 3 |
+
"creators": [
|
| 4 |
+
{
|
| 5 |
+
"affiliation": "Hugging Face",
|
| 6 |
+
"name": "Quentin Lhoest"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"orcid": "0000-0003-1727-1045",
|
| 10 |
+
"affiliation": "Hugging Face",
|
| 11 |
+
"name": "Albert Villanova del Moral"
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"affiliation": "Hugging Face",
|
| 15 |
+
"name": "Patrick von Platen"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"affiliation": "Hugging Face",
|
| 19 |
+
"name": "Thomas Wolf"
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"affiliation": "Hugging Face",
|
| 23 |
+
"name": "Mario Šaško"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"affiliation": "Hugging Face",
|
| 27 |
+
"name": "Yacine Jernite"
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"affiliation": "Hugging Face",
|
| 31 |
+
"name": "Abhishek Thakur"
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"affiliation": "Hugging Face",
|
| 35 |
+
"name": "Lewis Tunstall"
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"affiliation": "Hugging Face",
|
| 39 |
+
"name": "Suraj Patil"
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"affiliation": "Hugging Face",
|
| 43 |
+
"name": "Mariama Drame"
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"affiliation": "Hugging Face",
|
| 47 |
+
"name": "Julien Chaumond"
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"affiliation": "Hugging Face",
|
| 51 |
+
"name": "Julien Plu"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"affiliation": "Hugging Face",
|
| 55 |
+
"name": "Joe Davison"
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"affiliation": "Hugging Face",
|
| 59 |
+
"name": "Simon Brandeis"
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"affiliation": "Hugging Face",
|
| 63 |
+
"name": "Victor Sanh"
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"affiliation": "Hugging Face",
|
| 67 |
+
"name": "Teven Le Scao"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"affiliation": "Hugging Face",
|
| 71 |
+
"name": "Kevin Canwen Xu"
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"affiliation": "Hugging Face",
|
| 75 |
+
"name": "Nicolas Patry"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"affiliation": "Hugging Face",
|
| 79 |
+
"name": "Steven Liu"
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"affiliation": "Hugging Face",
|
| 83 |
+
"name": "Angelina McMillan-Major"
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"affiliation": "Hugging Face",
|
| 87 |
+
"name": "Philipp Schmid"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"affiliation": "Hugging Face",
|
| 91 |
+
"name": "Sylvain Gugger"
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"affiliation": "Hugging Face",
|
| 95 |
+
"name": "Nathan Raw"
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"affiliation": "Hugging Face",
|
| 99 |
+
"name": "Sylvain Lesage"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"affiliation": "Hugging Face",
|
| 103 |
+
"name": "Anton Lozhkov"
|
| 104 |
+
},
|
| 105 |
+
{
|
| 106 |
+
"affiliation": "Hugging Face",
|
| 107 |
+
"name": "Matthew Carrigan"
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"affiliation": "Hugging Face",
|
| 111 |
+
"name": "Th\u00e9o Matussi\u00e8re"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"affiliation": "Hugging Face",
|
| 115 |
+
"name": "Leandro von Werra"
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"affiliation": "Hugging Face",
|
| 119 |
+
"name": "Lysandre Debut"
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"affiliation": "Hugging Face",
|
| 123 |
+
"name": "Stas Bekman"
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"affiliation": "Hugging Face",
|
| 127 |
+
"name": "Cl\u00e9ment Delangue"
|
| 128 |
+
}
|
| 129 |
+
]
|
| 130 |
+
}
|
testbed/huggingface__datasets/ADD_NEW_DATASET.md
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# How to add one new datasets
|
| 2 |
+
|
| 3 |
+
Add datasets directly to the 🤗 Hugging Face Hub!
|
| 4 |
+
|
| 5 |
+
You can share your dataset on https://huggingface.co/datasets directly using your account, see the documentation:
|
| 6 |
+
|
| 7 |
+
* [Create a dataset and upload files on the website](https://huggingface.co/docs/datasets/upload_dataset)
|
| 8 |
+
* [Advanced guide using the CLI](https://huggingface.co/docs/datasets/share)
|
testbed/huggingface__datasets/AUTHORS
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This is the list of HuggingFace Datasets authors for copyright purposes.
|
| 2 |
+
#
|
| 3 |
+
# This does not necessarily list everyone who has contributed code, since in
|
| 4 |
+
# some cases, their employer may be the copyright holder. To see the full list
|
| 5 |
+
# of contributors, see the revision history in source control.
|
| 6 |
+
|
| 7 |
+
Google Inc.
|
| 8 |
+
HuggingFace Inc.
|
testbed/huggingface__datasets/CITATION.cff
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cff-version: 1.2.0
|
| 2 |
+
message: "If you use this software, please cite it as below."
|
| 3 |
+
title: "huggingface/datasets"
|
| 4 |
+
authors:
|
| 5 |
+
- family-names: Lhoest
|
| 6 |
+
given-names: Quentin
|
| 7 |
+
- family-names: Villanova del Moral
|
| 8 |
+
given-names: Albert
|
| 9 |
+
orcid: "https://orcid.org/0000-0003-1727-1045"
|
| 10 |
+
- family-names: von Platen
|
| 11 |
+
given-names: Patrick
|
| 12 |
+
- family-names: Wolf
|
| 13 |
+
given-names: Thomas
|
| 14 |
+
- family-names: Šaško
|
| 15 |
+
given-names: Mario
|
| 16 |
+
- family-names: Jernite
|
| 17 |
+
given-names: Yacine
|
| 18 |
+
- family-names: Thakur
|
| 19 |
+
given-names: Abhishek
|
| 20 |
+
- family-names: Tunstall
|
| 21 |
+
given-names: Lewis
|
| 22 |
+
- family-names: Patil
|
| 23 |
+
given-names: Suraj
|
| 24 |
+
- family-names: Drame
|
| 25 |
+
given-names: Mariama
|
| 26 |
+
- family-names: Chaumond
|
| 27 |
+
given-names: Julien
|
| 28 |
+
- family-names: Plu
|
| 29 |
+
given-names: Julien
|
| 30 |
+
- family-names: Davison
|
| 31 |
+
given-names: Joe
|
| 32 |
+
- family-names: Brandeis
|
| 33 |
+
given-names: Simon
|
| 34 |
+
- family-names: Sanh
|
| 35 |
+
given-names: Victor
|
| 36 |
+
- family-names: Le Scao
|
| 37 |
+
given-names: Teven
|
| 38 |
+
- family-names: Canwen Xu
|
| 39 |
+
given-names: Kevin
|
| 40 |
+
- family-names: Patry
|
| 41 |
+
given-names: Nicolas
|
| 42 |
+
- family-names: Liu
|
| 43 |
+
given-names: Steven
|
| 44 |
+
- family-names: McMillan-Major
|
| 45 |
+
given-names: Angelina
|
| 46 |
+
- family-names: Schmid
|
| 47 |
+
given-names: Philipp
|
| 48 |
+
- family-names: Gugger
|
| 49 |
+
given-names: Sylvain
|
| 50 |
+
- family-names: Raw
|
| 51 |
+
given-names: Nathan
|
| 52 |
+
- family-names: Lesage
|
| 53 |
+
given-names: Sylvain
|
| 54 |
+
- family-names: Lozhkov
|
| 55 |
+
given-names: Anton
|
| 56 |
+
- family-names: Carrigan
|
| 57 |
+
given-names: Matthew
|
| 58 |
+
- family-names: Matussière
|
| 59 |
+
given-names: Théo
|
| 60 |
+
- family-names: von Werra
|
| 61 |
+
given-names: Leandro
|
| 62 |
+
- family-names: Debut
|
| 63 |
+
given-names: Lysandre
|
| 64 |
+
- family-names: Bekman
|
| 65 |
+
given-names: Stas
|
| 66 |
+
- family-names: Delangue
|
| 67 |
+
given-names: Clément
|
| 68 |
+
doi: 10.5281/zenodo.4817768
|
| 69 |
+
repository-code: "https://github.com/huggingface/datasets"
|
| 70 |
+
license: Apache-2.0
|
| 71 |
+
preferred-citation:
|
| 72 |
+
type: conference-paper
|
| 73 |
+
title: "Datasets: A Community Library for Natural Language Processing"
|
| 74 |
+
authors:
|
| 75 |
+
- family-names: Lhoest
|
| 76 |
+
given-names: Quentin
|
| 77 |
+
- family-names: Villanova del Moral
|
| 78 |
+
given-names: Albert
|
| 79 |
+
orcid: "https://orcid.org/0000-0003-1727-1045"
|
| 80 |
+
- family-names: von Platen
|
| 81 |
+
given-names: Patrick
|
| 82 |
+
- family-names: Wolf
|
| 83 |
+
given-names: Thomas
|
| 84 |
+
- family-names: Šaško
|
| 85 |
+
given-names: Mario
|
| 86 |
+
- family-names: Jernite
|
| 87 |
+
given-names: Yacine
|
| 88 |
+
- family-names: Thakur
|
| 89 |
+
given-names: Abhishek
|
| 90 |
+
- family-names: Tunstall
|
| 91 |
+
given-names: Lewis
|
| 92 |
+
- family-names: Patil
|
| 93 |
+
given-names: Suraj
|
| 94 |
+
- family-names: Drame
|
| 95 |
+
given-names: Mariama
|
| 96 |
+
- family-names: Chaumond
|
| 97 |
+
given-names: Julien
|
| 98 |
+
- family-names: Plu
|
| 99 |
+
given-names: Julien
|
| 100 |
+
- family-names: Davison
|
| 101 |
+
given-names: Joe
|
| 102 |
+
- family-names: Brandeis
|
| 103 |
+
given-names: Simon
|
| 104 |
+
- family-names: Sanh
|
| 105 |
+
given-names: Victor
|
| 106 |
+
- family-names: Le Scao
|
| 107 |
+
given-names: Teven
|
| 108 |
+
- family-names: Canwen Xu
|
| 109 |
+
given-names: Kevin
|
| 110 |
+
- family-names: Patry
|
| 111 |
+
given-names: Nicolas
|
| 112 |
+
- family-names: Liu
|
| 113 |
+
given-names: Steven
|
| 114 |
+
- family-names: McMillan-Major
|
| 115 |
+
given-names: Angelina
|
| 116 |
+
- family-names: Schmid
|
| 117 |
+
given-names: Philipp
|
| 118 |
+
- family-names: Gugger
|
| 119 |
+
given-names: Sylvain
|
| 120 |
+
- family-names: Raw
|
| 121 |
+
given-names: Nathan
|
| 122 |
+
- family-names: Lesage
|
| 123 |
+
given-names: Sylvain
|
| 124 |
+
- family-names: Lozhkov
|
| 125 |
+
given-names: Anton
|
| 126 |
+
- family-names: Carrigan
|
| 127 |
+
given-names: Matthew
|
| 128 |
+
- family-names: Matussière
|
| 129 |
+
given-names: Théo
|
| 130 |
+
- family-names: von Werra
|
| 131 |
+
given-names: Leandro
|
| 132 |
+
- family-names: Debut
|
| 133 |
+
given-names: Lysandre
|
| 134 |
+
- family-names: Bekman
|
| 135 |
+
given-names: Stas
|
| 136 |
+
- family-names: Delangue
|
| 137 |
+
given-names: Clément
|
| 138 |
+
collection-title: "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations"
|
| 139 |
+
collection-type: proceedings
|
| 140 |
+
month: 11
|
| 141 |
+
year: 2021
|
| 142 |
+
publisher:
|
| 143 |
+
name: "Association for Computational Linguistics"
|
| 144 |
+
url: "https://aclanthology.org/2021.emnlp-demo.21"
|
| 145 |
+
start: 175
|
| 146 |
+
end: 184
|
| 147 |
+
identifiers:
|
| 148 |
+
- type: other
|
| 149 |
+
value: "arXiv:2109.02846"
|
| 150 |
+
description: "The arXiv preprint of the paper"
|
testbed/huggingface__datasets/CODE_OF_CONDUCT.md
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributor Covenant Code of Conduct
|
| 2 |
+
|
| 3 |
+
## Our Pledge
|
| 4 |
+
|
| 5 |
+
We as members, contributors, and leaders pledge to make participation in our
|
| 6 |
+
community a harassment-free experience for everyone, regardless of age, body
|
| 7 |
+
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
| 8 |
+
identity and expression, level of experience, education, socio-economic status,
|
| 9 |
+
nationality, personal appearance, race, caste, color, religion, or sexual identity
|
| 10 |
+
and orientation.
|
| 11 |
+
|
| 12 |
+
We pledge to act and interact in ways that contribute to an open, welcoming,
|
| 13 |
+
diverse, inclusive, and healthy community.
|
| 14 |
+
|
| 15 |
+
## Our Standards
|
| 16 |
+
|
| 17 |
+
Examples of behavior that contributes to a positive environment for our
|
| 18 |
+
community include:
|
| 19 |
+
|
| 20 |
+
* Demonstrating empathy and kindness toward other people
|
| 21 |
+
* Being respectful of differing opinions, viewpoints, and experiences
|
| 22 |
+
* Giving and gracefully accepting constructive feedback
|
| 23 |
+
* Accepting responsibility and apologizing to those affected by our mistakes,
|
| 24 |
+
and learning from the experience
|
| 25 |
+
* Focusing on what is best not just for us as individuals, but for the
|
| 26 |
+
overall community
|
| 27 |
+
|
| 28 |
+
Examples of unacceptable behavior include:
|
| 29 |
+
|
| 30 |
+
* The use of sexualized language or imagery, and sexual attention or
|
| 31 |
+
advances of any kind
|
| 32 |
+
* Trolling, insulting or derogatory comments, and personal or political attacks
|
| 33 |
+
* Public or private harassment
|
| 34 |
+
* Publishing others' private information, such as a physical or email
|
| 35 |
+
address, without their explicit permission
|
| 36 |
+
* Other conduct which could reasonably be considered inappropriate in a
|
| 37 |
+
professional setting
|
| 38 |
+
|
| 39 |
+
## Enforcement Responsibilities
|
| 40 |
+
|
| 41 |
+
Community leaders are responsible for clarifying and enforcing our standards of
|
| 42 |
+
acceptable behavior and will take appropriate and fair corrective action in
|
| 43 |
+
response to any behavior that they deem inappropriate, threatening, offensive,
|
| 44 |
+
or harmful.
|
| 45 |
+
|
| 46 |
+
Community leaders have the right and responsibility to remove, edit, or reject
|
| 47 |
+
comments, commits, code, wiki edits, issues, and other contributions that are
|
| 48 |
+
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
| 49 |
+
decisions when appropriate.
|
| 50 |
+
|
| 51 |
+
## Scope
|
| 52 |
+
|
| 53 |
+
This Code of Conduct applies within all community spaces, and also applies when
|
| 54 |
+
an individual is officially representing the community in public spaces.
|
| 55 |
+
Examples of representing our community include using an official e-mail address,
|
| 56 |
+
posting via an official social media account, or acting as an appointed
|
| 57 |
+
representative at an online or offline event.
|
| 58 |
+
|
| 59 |
+
## Enforcement
|
| 60 |
+
|
| 61 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
| 62 |
+
reported to the community leaders responsible for enforcement at
|
| 63 |
+
feedback@huggingface.co.
|
| 64 |
+
All complaints will be reviewed and investigated promptly and fairly.
|
| 65 |
+
|
| 66 |
+
All community leaders are obligated to respect the privacy and security of the
|
| 67 |
+
reporter of any incident.
|
| 68 |
+
|
| 69 |
+
## Enforcement Guidelines
|
| 70 |
+
|
| 71 |
+
Community leaders will follow these Community Impact Guidelines in determining
|
| 72 |
+
the consequences for any action they deem in violation of this Code of Conduct:
|
| 73 |
+
|
| 74 |
+
### 1. Correction
|
| 75 |
+
|
| 76 |
+
**Community Impact**: Use of inappropriate language or other behavior deemed
|
| 77 |
+
unprofessional or unwelcome in the community.
|
| 78 |
+
|
| 79 |
+
**Consequence**: A private, written warning from community leaders, providing
|
| 80 |
+
clarity around the nature of the violation and an explanation of why the
|
| 81 |
+
behavior was inappropriate. A public apology may be requested.
|
| 82 |
+
|
| 83 |
+
### 2. Warning
|
| 84 |
+
|
| 85 |
+
**Community Impact**: A violation through a single incident or series
|
| 86 |
+
of actions.
|
| 87 |
+
|
| 88 |
+
**Consequence**: A warning with consequences for continued behavior. No
|
| 89 |
+
interaction with the people involved, including unsolicited interaction with
|
| 90 |
+
those enforcing the Code of Conduct, for a specified period of time. This
|
| 91 |
+
includes avoiding interactions in community spaces as well as external channels
|
| 92 |
+
like social media. Violating these terms may lead to a temporary or
|
| 93 |
+
permanent ban.
|
| 94 |
+
|
| 95 |
+
### 3. Temporary Ban
|
| 96 |
+
|
| 97 |
+
**Community Impact**: A serious violation of community standards, including
|
| 98 |
+
sustained inappropriate behavior.
|
| 99 |
+
|
| 100 |
+
**Consequence**: A temporary ban from any sort of interaction or public
|
| 101 |
+
communication with the community for a specified period of time. No public or
|
| 102 |
+
private interaction with the people involved, including unsolicited interaction
|
| 103 |
+
with those enforcing the Code of Conduct, is allowed during this period.
|
| 104 |
+
Violating these terms may lead to a permanent ban.
|
| 105 |
+
|
| 106 |
+
### 4. Permanent Ban
|
| 107 |
+
|
| 108 |
+
**Community Impact**: Demonstrating a pattern of violation of community
|
| 109 |
+
standards, including sustained inappropriate behavior, harassment of an
|
| 110 |
+
individual, or aggression toward or disparagement of classes of individuals.
|
| 111 |
+
|
| 112 |
+
**Consequence**: A permanent ban from any sort of public interaction within
|
| 113 |
+
the community.
|
| 114 |
+
|
| 115 |
+
## Attribution
|
| 116 |
+
|
| 117 |
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
| 118 |
+
version 2.0, available at
|
| 119 |
+
[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0].
|
| 120 |
+
|
| 121 |
+
Community Impact Guidelines were inspired by
|
| 122 |
+
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
| 123 |
+
|
| 124 |
+
For answers to common questions about this code of conduct, see the FAQ at
|
| 125 |
+
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
|
| 126 |
+
at [https://www.contributor-covenant.org/translations][translations].
|
| 127 |
+
|
| 128 |
+
[homepage]: https://www.contributor-covenant.org
|
| 129 |
+
[v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html
|
| 130 |
+
[Mozilla CoC]: https://github.com/mozilla/diversity
|
| 131 |
+
[FAQ]: https://www.contributor-covenant.org/faq
|
| 132 |
+
[translations]: https://www.contributor-covenant.org/translations
|
testbed/huggingface__datasets/CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# How to contribute to Datasets?
|
| 2 |
+
[](CODE_OF_CONDUCT.md)
|
| 3 |
+
|
| 4 |
+
Datasets is an open source project, so all contributions and suggestions are welcome.
|
| 5 |
+
|
| 6 |
+
You can contribute in many different ways: giving ideas, answering questions, reporting bugs, proposing enhancements,
|
| 7 |
+
improving the documentation, fixing bugs,...
|
| 8 |
+
|
| 9 |
+
Many thanks in advance to every contributor.
|
| 10 |
+
|
| 11 |
+
In order to facilitate healthy, constructive behavior in an open and inclusive community, we all respect and abide by
|
| 12 |
+
our [code of conduct](CODE_OF_CONDUCT.md).
|
| 13 |
+
|
| 14 |
+
## How to work on an open Issue?
|
| 15 |
+
You have the list of open Issues at: https://github.com/huggingface/datasets/issues
|
| 16 |
+
|
| 17 |
+
Some of them may have the label `help wanted`: that means that any contributor is welcomed!
|
| 18 |
+
|
| 19 |
+
If you would like to work on any of the open Issues:
|
| 20 |
+
|
| 21 |
+
1. Make sure it is not already assigned to someone else. You have the assignee (if any) on the top of the right column of the Issue page.
|
| 22 |
+
|
| 23 |
+
2. You can self-assign it by commenting on the Issue page with the keyword: `#self-assign`.
|
| 24 |
+
|
| 25 |
+
3. Work on your self-assigned issue and eventually create a Pull Request.
|
| 26 |
+
|
| 27 |
+
## How to create a Pull Request?
|
| 28 |
+
If you want to add a dataset see specific instructions in the section [*How to add a dataset*](#how-to-add-a-dataset).
|
| 29 |
+
|
| 30 |
+
1. Fork the [repository](https://github.com/huggingface/datasets) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account.
|
| 31 |
+
|
| 32 |
+
2. Clone your fork to your local disk, and add the base repository as a remote:
|
| 33 |
+
|
| 34 |
+
```bash
|
| 35 |
+
git clone git@github.com:<your Github handle>/datasets.git
|
| 36 |
+
cd datasets
|
| 37 |
+
git remote add upstream https://github.com/huggingface/datasets.git
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
3. Create a new branch to hold your development changes:
|
| 41 |
+
|
| 42 |
+
```bash
|
| 43 |
+
git checkout -b a-descriptive-name-for-my-changes
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
**do not** work on the `main` branch.
|
| 47 |
+
|
| 48 |
+
4. Set up a development environment by running the following command in a virtual environment:
|
| 49 |
+
|
| 50 |
+
```bash
|
| 51 |
+
pip install -e ".[dev]"
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
(If datasets was already installed in the virtual environment, remove
|
| 55 |
+
it with `pip uninstall datasets` before reinstalling it in editable
|
| 56 |
+
mode with the `-e` flag.)
|
| 57 |
+
|
| 58 |
+
5. Develop the features on your branch.
|
| 59 |
+
|
| 60 |
+
6. Format your code. Run `black` and `ruff` so that your newly added files look nice with the following command:
|
| 61 |
+
|
| 62 |
+
```bash
|
| 63 |
+
make style
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
7. _(Optional)_ You can also use [`pre-commit`](https://pre-commit.com/) to format your code automatically each time run `git commit`, instead of running `make style` manually.
|
| 67 |
+
To do this, install `pre-commit` via `pip install pre-commit` and then run `pre-commit install` in the project's root directory to set up the hooks.
|
| 68 |
+
Note that if any files were formatted by `pre-commit` hooks during committing, you have to run `git commit` again .
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
8. Once you're happy with your contribution, add your changed files and make a commit to record your changes locally:
|
| 72 |
+
|
| 73 |
+
```bash
|
| 74 |
+
git add -u
|
| 75 |
+
git commit
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
It is a good idea to sync your copy of the code with the original
|
| 79 |
+
repository regularly. This way you can quickly account for changes:
|
| 80 |
+
|
| 81 |
+
```bash
|
| 82 |
+
git fetch upstream
|
| 83 |
+
git rebase upstream/main
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
9. Once you are satisfied, push the changes to your fork repo using:
|
| 87 |
+
|
| 88 |
+
```bash
|
| 89 |
+
git push -u origin a-descriptive-name-for-my-changes
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
Go the webpage of your fork on GitHub. Click on "Pull request" to send your to the project maintainers for review.
|
| 93 |
+
|
| 94 |
+
## How to add a dataset
|
| 95 |
+
|
| 96 |
+
You can share your dataset on https://huggingface.co/datasets directly using your account, see the documentation:
|
| 97 |
+
|
| 98 |
+
* [Create a dataset and upload files on the website](https://huggingface.co/docs/datasets/upload_dataset)
|
| 99 |
+
* [Advanced guide using the CLI](https://huggingface.co/docs/datasets/share)
|
| 100 |
+
|
| 101 |
+
## How to contribute to the dataset cards
|
| 102 |
+
|
| 103 |
+
Improving the documentation of datasets is an ever-increasing effort, and we invite users to contribute by sharing their insights with the community in the `README.md` dataset cards provided for each dataset.
|
| 104 |
+
|
| 105 |
+
If you see that a dataset card is missing information that you are in a position to provide (as an author of the dataset or as an experienced user), the best thing you can do is to open a Pull Request on the Hugging Face Hub. To do, go to the "Files and versions" tab of the dataset page and edit the `README.md` file. We provide:
|
| 106 |
+
|
| 107 |
+
* a [template](https://github.com/huggingface/datasets/blob/main/templates/README.md)
|
| 108 |
+
* a [guide](https://github.com/huggingface/datasets/blob/main/templates/README_guide.md) describing what information should go into each of the paragraphs
|
| 109 |
+
* and if you need inspiration, we recommend looking through a [completed example](https://huggingface.co/datasets/eli5/blob/main/README.md)
|
| 110 |
+
|
| 111 |
+
If you are a **dataset author**... you know what to do, it is your dataset after all ;) ! We would especially appreciate if you could help us fill in information about the process of creating the dataset, and take a moment to reflect on its social impact and possible limitations if you haven't already done so in the dataset paper or in another data statement.
|
| 112 |
+
|
| 113 |
+
If you are a **user of a dataset**, the main source of information should be the dataset paper if it is available: we recommend pulling information from there into the relevant paragraphs of the template. We also eagerly welcome discussions on the [Considerations for Using the Data](https://github.com/huggingface/datasets/blob/main/templates/README_guide.md#considerations-for-using-the-data) based on existing scholarship or personal experience that would benefit the whole community.
|
| 114 |
+
|
| 115 |
+
Finally, if you want more information on the how and why of dataset cards, we strongly recommend reading the foundational works [Datasheets for Datasets](https://arxiv.org/abs/1803.09010) and [Data Statements for NLP](https://www.aclweb.org/anthology/Q18-1041/).
|
| 116 |
+
|
| 117 |
+
Thank you for your contribution!
|
| 118 |
+
|
| 119 |
+
## Code of conduct
|
| 120 |
+
|
| 121 |
+
This project adheres to the HuggingFace [code of conduct](CODE_OF_CONDUCT.md).
|
| 122 |
+
By participating, you are expected to abide by this code.
|
testbed/huggingface__datasets/LICENSE
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright [yyyy] [name of copyright owner]
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
testbed/huggingface__datasets/Makefile
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.PHONY: quality style test
|
| 2 |
+
|
| 3 |
+
check_dirs := tests src benchmarks metrics utils
|
| 4 |
+
|
| 5 |
+
# Check that source code meets quality standards
|
| 6 |
+
|
| 7 |
+
quality:
|
| 8 |
+
ruff check $(check_dirs) setup.py # linter
|
| 9 |
+
ruff format --check $(check_dirs) setup.py # formatter
|
| 10 |
+
|
| 11 |
+
# Format source code automatically
|
| 12 |
+
|
| 13 |
+
style:
|
| 14 |
+
ruff check --fix $(check_dirs) setup.py # linter
|
| 15 |
+
ruff format $(check_dirs) setup.py # formatter
|
| 16 |
+
|
| 17 |
+
# Run tests for the library
|
| 18 |
+
|
| 19 |
+
test:
|
| 20 |
+
python -m pytest -n auto --dist=loadfile -s -v ./tests/
|
testbed/huggingface__datasets/README.md
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<p align="center">
|
| 2 |
+
<picture>
|
| 3 |
+
<source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-dark.svg">
|
| 4 |
+
<source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-light.svg">
|
| 5 |
+
<img alt="Hugging Face Datasets Library" src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-light.svg" width="352" height="59" style="max-width: 100%;">
|
| 6 |
+
</picture>
|
| 7 |
+
<br/>
|
| 8 |
+
<br/>
|
| 9 |
+
</p>
|
| 10 |
+
|
| 11 |
+
<p align="center">
|
| 12 |
+
<a href="https://github.com/huggingface/datasets/actions/workflows/ci.yml?query=branch%3Amain">
|
| 13 |
+
<img alt="Build" src="https://github.com/huggingface/datasets/actions/workflows/ci.yml/badge.svg?branch=main">
|
| 14 |
+
</a>
|
| 15 |
+
<a href="https://github.com/huggingface/datasets/blob/main/LICENSE">
|
| 16 |
+
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue">
|
| 17 |
+
</a>
|
| 18 |
+
<a href="https://huggingface.co/docs/datasets/index.html">
|
| 19 |
+
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/datasets/index.html.svg?down_color=red&down_message=offline&up_message=online">
|
| 20 |
+
</a>
|
| 21 |
+
<a href="https://github.com/huggingface/datasets/releases">
|
| 22 |
+
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/datasets.svg">
|
| 23 |
+
</a>
|
| 24 |
+
<a href="https://huggingface.co/datasets/">
|
| 25 |
+
<img alt="Number of datasets" src="https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/datasets&color=brightgreen">
|
| 26 |
+
</a>
|
| 27 |
+
<a href="CODE_OF_CONDUCT.md">
|
| 28 |
+
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg">
|
| 29 |
+
</a>
|
| 30 |
+
<a href="https://zenodo.org/badge/latestdoi/250213286"><img src="https://zenodo.org/badge/250213286.svg" alt="DOI"></a>
|
| 31 |
+
</p>
|
| 32 |
+
|
| 33 |
+
🤗 Datasets is a lightweight library providing **two** main features:
|
| 34 |
+
|
| 35 |
+
- **one-line dataloaders for many public datasets**: one-liners to download and pre-process any of the  major public datasets (image datasets, audio datasets, text datasets in 467 languages and dialects, etc.) provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets). With a simple command like `squad_dataset = load_dataset("squad")`, get any of these datasets ready to use in a dataloader for training/evaluating a ML model (Numpy/Pandas/PyTorch/TensorFlow/JAX),
|
| 36 |
+
- **efficient data pre-processing**: simple, fast and reproducible data pre-processing for the public datasets as well as your own local datasets in CSV, JSON, text, PNG, JPEG, WAV, MP3, Parquet, etc. With simple commands like `processed_dataset = dataset.map(process_example)`, efficiently prepare the dataset for inspection and ML model evaluation and training.
|
| 37 |
+
|
| 38 |
+
[🎓 **Documentation**](https://huggingface.co/docs/datasets/) [🔎 **Find a dataset in the Hub**](https://huggingface.co/datasets) [🌟 **Share a dataset on the Hub**](https://huggingface.co/docs/datasets/share)
|
| 39 |
+
|
| 40 |
+
<h3 align="center">
|
| 41 |
+
<a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/datasets/main/docs/source/imgs/course_banner.png"></a>
|
| 42 |
+
</h3>
|
| 43 |
+
|
| 44 |
+
🤗 Datasets is designed to let the community easily add and share new datasets.
|
| 45 |
+
|
| 46 |
+
🤗 Datasets has many additional interesting features:
|
| 47 |
+
|
| 48 |
+
- Thrive on large datasets: 🤗 Datasets naturally frees the user from RAM memory limitation, all datasets are memory-mapped using an efficient zero-serialization cost backend (Apache Arrow).
|
| 49 |
+
- Smart caching: never wait for your data to process several times.
|
| 50 |
+
- Lightweight and fast with a transparent and pythonic API (multi-processing/caching/memory-mapping).
|
| 51 |
+
- Built-in interoperability with NumPy, pandas, PyTorch, TensorFlow 2 and JAX.
|
| 52 |
+
- Native support for audio and image data.
|
| 53 |
+
- Enable streaming mode to save disk space and start iterating over the dataset immediately.
|
| 54 |
+
|
| 55 |
+
🤗 Datasets originated from a fork of the awesome [TensorFlow Datasets](https://github.com/tensorflow/datasets) and the HuggingFace team want to deeply thank the TensorFlow Datasets team for building this amazing library. More details on the differences between 🤗 Datasets and `tfds` can be found in the section [Main differences between 🤗 Datasets and `tfds`](#main-differences-between--datasets-and-tfds).
|
| 56 |
+
|
| 57 |
+
# Installation
|
| 58 |
+
|
| 59 |
+
## With pip
|
| 60 |
+
|
| 61 |
+
🤗 Datasets can be installed from PyPi and has to be installed in a virtual environment (venv or conda for instance)
|
| 62 |
+
|
| 63 |
+
```bash
|
| 64 |
+
pip install datasets
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
## With conda
|
| 68 |
+
|
| 69 |
+
🤗 Datasets can be installed using conda as follows:
|
| 70 |
+
|
| 71 |
+
```bash
|
| 72 |
+
conda install -c huggingface -c conda-forge datasets
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
Follow the installation pages of TensorFlow and PyTorch to see how to install them with conda.
|
| 76 |
+
|
| 77 |
+
For more details on installation, check the installation page in the documentation: https://huggingface.co/docs/datasets/installation
|
| 78 |
+
|
| 79 |
+
## Installation to use with PyTorch/TensorFlow/pandas
|
| 80 |
+
|
| 81 |
+
If you plan to use 🤗 Datasets with PyTorch (1.0+), TensorFlow (2.2+) or pandas, you should also install PyTorch, TensorFlow or pandas.
|
| 82 |
+
|
| 83 |
+
For more details on using the library with NumPy, pandas, PyTorch or TensorFlow, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart
|
| 84 |
+
|
| 85 |
+
# Usage
|
| 86 |
+
|
| 87 |
+
🤗 Datasets is made to be very simple to use - the API is centered around a single function, `datasets.load_dataset(dataset_name, **kwargs)`, that instantiates a dataset.
|
| 88 |
+
|
| 89 |
+
This library can be used for text/image/audio/etc. datasets. Here is an example to load a text dataset:
|
| 90 |
+
|
| 91 |
+
Here is a quick example:
|
| 92 |
+
|
| 93 |
+
```python
|
| 94 |
+
from datasets import load_dataset
|
| 95 |
+
|
| 96 |
+
# Print all the available datasets
|
| 97 |
+
from huggingface_hub import list_datasets
|
| 98 |
+
print([dataset.id for dataset in list_datasets()])
|
| 99 |
+
|
| 100 |
+
# Load a dataset and print the first example in the training set
|
| 101 |
+
squad_dataset = load_dataset('squad')
|
| 102 |
+
print(squad_dataset['train'][0])
|
| 103 |
+
|
| 104 |
+
# Process the dataset - add a column with the length of the context texts
|
| 105 |
+
dataset_with_length = squad_dataset.map(lambda x: {"length": len(x["context"])})
|
| 106 |
+
|
| 107 |
+
# Process the dataset - tokenize the context texts (using a tokenizer from the 🤗 Transformers library)
|
| 108 |
+
from transformers import AutoTokenizer
|
| 109 |
+
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
|
| 110 |
+
|
| 111 |
+
tokenized_dataset = squad_dataset.map(lambda x: tokenizer(x['context']), batched=True)
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
If your dataset is bigger than your disk or if you don't want to wait to download the data, you can use streaming:
|
| 115 |
+
|
| 116 |
+
```python
|
| 117 |
+
# If you want to use the dataset immediately and efficiently stream the data as you iterate over the dataset
|
| 118 |
+
image_dataset = load_dataset('cifar100', streaming=True)
|
| 119 |
+
for example in image_dataset["train"]:
|
| 120 |
+
break
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
For more details on using the library, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart and the specific pages on:
|
| 124 |
+
|
| 125 |
+
- Loading a dataset: https://huggingface.co/docs/datasets/loading
|
| 126 |
+
- What's in a Dataset: https://huggingface.co/docs/datasets/access
|
| 127 |
+
- Processing data with 🤗 Datasets: https://huggingface.co/docs/datasets/process
|
| 128 |
+
- Processing audio data: https://huggingface.co/docs/datasets/audio_process
|
| 129 |
+
- Processing image data: https://huggingface.co/docs/datasets/image_process
|
| 130 |
+
- Processing text data: https://huggingface.co/docs/datasets/nlp_process
|
| 131 |
+
- Streaming a dataset: https://huggingface.co/docs/datasets/stream
|
| 132 |
+
- Writing your own dataset loading script: https://huggingface.co/docs/datasets/dataset_script
|
| 133 |
+
- etc.
|
| 134 |
+
|
| 135 |
+
# Add a new dataset to the Hub
|
| 136 |
+
|
| 137 |
+
We have a very detailed step-by-step guide to add a new dataset to the  datasets already provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets).
|
| 138 |
+
|
| 139 |
+
You can find:
|
| 140 |
+
- [how to upload a dataset to the Hub using your web browser or Python](https://huggingface.co/docs/datasets/upload_dataset) and also
|
| 141 |
+
- [how to upload it using Git](https://huggingface.co/docs/datasets/share).
|
| 142 |
+
|
| 143 |
+
# Main differences between 🤗 Datasets and `tfds`
|
| 144 |
+
|
| 145 |
+
If you are familiar with the great TensorFlow Datasets, here are the main differences between 🤗 Datasets and `tfds`:
|
| 146 |
+
|
| 147 |
+
- the scripts in 🤗 Datasets are not provided within the library but are queried, downloaded/cached and dynamically loaded upon request
|
| 148 |
+
- the backend serialization of 🤗 Datasets is based on [Apache Arrow](https://arrow.apache.org/) instead of TF Records and leverage python dataclasses for info and features with some diverging features (we mostly don't do encoding and store the raw data as much as possible in the backend serialization cache).
|
| 149 |
+
- the user-facing dataset object of 🤗 Datasets is not a `tf.data.Dataset` but a built-in framework-agnostic dataset class with methods inspired by what we like in `tf.data` (like a `map()` method). It basically wraps a memory-mapped Arrow table cache.
|
| 150 |
+
|
| 151 |
+
# Disclaimers
|
| 152 |
+
|
| 153 |
+
🤗 Datasets may run Python code defined by the dataset authors to parse certain data formats or structures. For security reasons, we ask users to:
|
| 154 |
+
- check the dataset scripts they're going to run beforehand and
|
| 155 |
+
- pin the `revision` of the repositories they use.
|
| 156 |
+
|
| 157 |
+
If you're a dataset owner and wish to update any part of it (description, citation, license, etc.), or do not want your dataset to be included in the Hugging Face Hub, please get in touch by opening a discussion or a pull request in the Community tab of the dataset page. Thanks for your contribution to the ML community!
|
| 158 |
+
|
| 159 |
+
## BibTeX
|
| 160 |
+
|
| 161 |
+
If you want to cite our 🤗 Datasets library, you can use our [paper](https://arxiv.org/abs/2109.02846):
|
| 162 |
+
|
| 163 |
+
```bibtex
|
| 164 |
+
@inproceedings{lhoest-etal-2021-datasets,
|
| 165 |
+
title = "Datasets: A Community Library for Natural Language Processing",
|
| 166 |
+
author = "Lhoest, Quentin and
|
| 167 |
+
Villanova del Moral, Albert and
|
| 168 |
+
Jernite, Yacine and
|
| 169 |
+
Thakur, Abhishek and
|
| 170 |
+
von Platen, Patrick and
|
| 171 |
+
Patil, Suraj and
|
| 172 |
+
Chaumond, Julien and
|
| 173 |
+
Drame, Mariama and
|
| 174 |
+
Plu, Julien and
|
| 175 |
+
Tunstall, Lewis and
|
| 176 |
+
Davison, Joe and
|
| 177 |
+
{\v{S}}a{\v{s}}ko, Mario and
|
| 178 |
+
Chhablani, Gunjan and
|
| 179 |
+
Malik, Bhavitvya and
|
| 180 |
+
Brandeis, Simon and
|
| 181 |
+
Le Scao, Teven and
|
| 182 |
+
Sanh, Victor and
|
| 183 |
+
Xu, Canwen and
|
| 184 |
+
Patry, Nicolas and
|
| 185 |
+
McMillan-Major, Angelina and
|
| 186 |
+
Schmid, Philipp and
|
| 187 |
+
Gugger, Sylvain and
|
| 188 |
+
Delangue, Cl{\'e}ment and
|
| 189 |
+
Matussi{\`e}re, Th{\'e}o and
|
| 190 |
+
Debut, Lysandre and
|
| 191 |
+
Bekman, Stas and
|
| 192 |
+
Cistac, Pierric and
|
| 193 |
+
Goehringer, Thibault and
|
| 194 |
+
Mustar, Victor and
|
| 195 |
+
Lagunas, Fran{\c{c}}ois and
|
| 196 |
+
Rush, Alexander and
|
| 197 |
+
Wolf, Thomas",
|
| 198 |
+
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
|
| 199 |
+
month = nov,
|
| 200 |
+
year = "2021",
|
| 201 |
+
address = "Online and Punta Cana, Dominican Republic",
|
| 202 |
+
publisher = "Association for Computational Linguistics",
|
| 203 |
+
url = "https://aclanthology.org/2021.emnlp-demo.21",
|
| 204 |
+
pages = "175--184",
|
| 205 |
+
abstract = "The scale, variety, and quantity of publicly-available NLP datasets has grown rapidly as researchers propose new tasks, larger models, and novel benchmarks. Datasets is a community library for contemporary NLP designed to support this ecosystem. Datasets aims to standardize end-user interfaces, versioning, and documentation, while providing a lightweight front-end that behaves similarly for small datasets as for internet-scale corpora. The design of the library incorporates a distributed, community-driven approach to adding datasets and documenting usage. After a year of development, the library now includes more than 650 unique datasets, has more than 250 contributors, and has helped support a variety of novel cross-dataset research projects and shared tasks. The library is available at https://github.com/huggingface/datasets.",
|
| 206 |
+
eprint={2109.02846},
|
| 207 |
+
archivePrefix={arXiv},
|
| 208 |
+
primaryClass={cs.CL},
|
| 209 |
+
}
|
| 210 |
+
```
|
| 211 |
+
|
| 212 |
+
If you need to cite a specific version of our 🤗 Datasets library for reproducibility, you can use the corresponding version Zenodo DOI from this [list](https://zenodo.org/search?q=conceptrecid:%224817768%22&sort=-version&all_versions=True).
|
testbed/huggingface__datasets/SECURITY.md
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Security Policy
|
| 2 |
+
|
| 3 |
+
## Supported Versions
|
| 4 |
+
<!--
|
| 5 |
+
Use this section to tell people about which versions of your project are
|
| 6 |
+
currently being supported with security updates.
|
| 7 |
+
|
| 8 |
+
| Version | Supported |
|
| 9 |
+
| ------- | ------------------ |
|
| 10 |
+
| 5.1.x | :white_check_mark: |
|
| 11 |
+
| 5.0.x | :x: |
|
| 12 |
+
| 4.0.x | :white_check_mark: |
|
| 13 |
+
| < 4.0 | :x: |
|
| 14 |
+
-->
|
| 15 |
+
|
| 16 |
+
Each major version is currently being supported with security updates.
|
| 17 |
+
|
| 18 |
+
| Version | Supported |
|
| 19 |
+
|---------|--------------------|
|
| 20 |
+
| 1.x.x | :white_check_mark: |
|
| 21 |
+
| 2.x.x | :white_check_mark: |
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
## Reporting a Vulnerability
|
| 25 |
+
<!--
|
| 26 |
+
Use this section to tell people how to report a vulnerability.
|
| 27 |
+
|
| 28 |
+
Tell them where to go, how often they can expect to get an update on a
|
| 29 |
+
reported vulnerability, what to expect if the vulnerability is accepted or
|
| 30 |
+
declined, etc.
|
| 31 |
+
-->
|
| 32 |
+
|
| 33 |
+
To report a security vulnerability, please contact: security@huggingface.co
|
testbed/huggingface__datasets/additional-tests-requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
unbabel-comet>=1.0.0
|
| 2 |
+
git+https://github.com/google-research/bleurt.git
|
| 3 |
+
git+https://github.com/ns-moosavi/coval.git
|
| 4 |
+
git+https://github.com/hendrycks/math.git
|
testbed/huggingface__datasets/benchmarks/benchmark_array_xd.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import tempfile
|
| 4 |
+
|
| 5 |
+
import datasets
|
| 6 |
+
from datasets.arrow_writer import ArrowWriter
|
| 7 |
+
from datasets.features import Array2D
|
| 8 |
+
from utils import generate_examples, get_duration
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
SHAPE_TEST_1 = (30, 487)
|
| 12 |
+
SHAPE_TEST_2 = (36, 1024)
|
| 13 |
+
SPEED_TEST_SHAPE = (100, 100)
|
| 14 |
+
SPEED_TEST_N_EXAMPLES = 100
|
| 15 |
+
|
| 16 |
+
DEFAULT_FEATURES = datasets.Features(
|
| 17 |
+
{"text": Array2D(SHAPE_TEST_1, dtype="float32"), "image": Array2D(SHAPE_TEST_2, dtype="float32")}
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
|
| 21 |
+
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@get_duration
|
| 25 |
+
def write(my_features, dummy_data, tmp_dir):
|
| 26 |
+
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
|
| 27 |
+
for key, record in dummy_data:
|
| 28 |
+
example = my_features.encode_example(record)
|
| 29 |
+
writer.write(example)
|
| 30 |
+
num_examples, num_bytes = writer.finalize()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@get_duration
|
| 34 |
+
def read_unformated(feats, tmp_dir):
|
| 35 |
+
dataset = datasets.Dataset.from_file(
|
| 36 |
+
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
|
| 37 |
+
)
|
| 38 |
+
for _ in dataset:
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@get_duration
|
| 43 |
+
def read_formatted_as_numpy(feats, tmp_dir):
|
| 44 |
+
dataset = datasets.Dataset.from_file(
|
| 45 |
+
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
|
| 46 |
+
)
|
| 47 |
+
dataset.set_format("numpy")
|
| 48 |
+
for _ in dataset:
|
| 49 |
+
pass
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@get_duration
|
| 53 |
+
def read_batch_unformated(feats, tmp_dir):
|
| 54 |
+
batch_size = 10
|
| 55 |
+
dataset = datasets.Dataset.from_file(
|
| 56 |
+
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
|
| 57 |
+
)
|
| 58 |
+
for i in range(0, len(dataset), batch_size):
|
| 59 |
+
_ = dataset[i : i + batch_size]
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@get_duration
|
| 63 |
+
def read_batch_formatted_as_numpy(feats, tmp_dir):
|
| 64 |
+
batch_size = 10
|
| 65 |
+
dataset = datasets.Dataset.from_file(
|
| 66 |
+
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
|
| 67 |
+
)
|
| 68 |
+
dataset.set_format("numpy")
|
| 69 |
+
for i in range(0, len(dataset), batch_size):
|
| 70 |
+
_ = dataset[i : i + batch_size]
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
@get_duration
|
| 74 |
+
def read_col_unformated(feats, tmp_dir):
|
| 75 |
+
dataset = datasets.Dataset.from_file(
|
| 76 |
+
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
|
| 77 |
+
)
|
| 78 |
+
for col in feats:
|
| 79 |
+
_ = dataset[col]
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@get_duration
|
| 83 |
+
def read_col_formatted_as_numpy(feats, tmp_dir):
|
| 84 |
+
dataset = datasets.Dataset.from_file(
|
| 85 |
+
filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats)
|
| 86 |
+
)
|
| 87 |
+
dataset.set_format("numpy")
|
| 88 |
+
for col in feats:
|
| 89 |
+
_ = dataset[col]
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def benchmark_array_xd():
|
| 93 |
+
times = {}
|
| 94 |
+
read_functions = (
|
| 95 |
+
read_unformated,
|
| 96 |
+
read_formatted_as_numpy,
|
| 97 |
+
read_batch_unformated,
|
| 98 |
+
read_batch_formatted_as_numpy,
|
| 99 |
+
read_col_unformated,
|
| 100 |
+
read_col_formatted_as_numpy,
|
| 101 |
+
)
|
| 102 |
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
| 103 |
+
feats = datasets.Features({"image": Array2D(SPEED_TEST_SHAPE, dtype="float32")})
|
| 104 |
+
data = generate_examples(features=feats, num_examples=SPEED_TEST_N_EXAMPLES)
|
| 105 |
+
times["write_array2d"] = write(feats, data, tmp_dir)
|
| 106 |
+
for read_func in read_functions:
|
| 107 |
+
times[read_func.__name__ + " after write_array2d"] = read_func(feats, tmp_dir)
|
| 108 |
+
|
| 109 |
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
| 110 |
+
# don't use fixed length for fair comparison
|
| 111 |
+
# feats = datasets.Features(
|
| 112 |
+
# {"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[1]), SPEED_TEST_SHAPE[0])}
|
| 113 |
+
# )
|
| 114 |
+
feats = datasets.Features({"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))})
|
| 115 |
+
data = generate_examples(
|
| 116 |
+
features=feats, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"image": SPEED_TEST_SHAPE}
|
| 117 |
+
)
|
| 118 |
+
times["write_nested_sequence"] = write(feats, data, tmp_dir)
|
| 119 |
+
for read_func in read_functions:
|
| 120 |
+
times[read_func.__name__ + " after write_nested_sequence"] = read_func(feats, tmp_dir)
|
| 121 |
+
|
| 122 |
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
| 123 |
+
# don't use fixed length for fair comparison
|
| 124 |
+
# feats = datasets.Features(
|
| 125 |
+
# {"image": datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1])}
|
| 126 |
+
# )
|
| 127 |
+
feats = datasets.Features({"image": datasets.Sequence(datasets.Value("float32"))})
|
| 128 |
+
data = generate_examples(
|
| 129 |
+
features=feats,
|
| 130 |
+
num_examples=SPEED_TEST_N_EXAMPLES,
|
| 131 |
+
seq_shapes={"image": [SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1]]},
|
| 132 |
+
)
|
| 133 |
+
times["write_flattened_sequence"] = write(feats, data, tmp_dir)
|
| 134 |
+
for read_func in read_functions:
|
| 135 |
+
times[read_func.__name__ + " after write_flattened_sequence"] = read_func(feats, tmp_dir)
|
| 136 |
+
|
| 137 |
+
with open(RESULTS_FILE_PATH, "wb") as f:
|
| 138 |
+
f.write(json.dumps(times).encode("utf-8"))
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
if __name__ == "__main__": # useful to run the profiler
|
| 142 |
+
benchmark_array_xd()
|
testbed/huggingface__datasets/benchmarks/benchmark_iterating.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import tempfile
|
| 4 |
+
|
| 5 |
+
import datasets
|
| 6 |
+
from utils import generate_example_dataset, get_duration
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
SPEED_TEST_N_EXAMPLES = 50_000
|
| 10 |
+
SMALL_TEST = 5_000
|
| 11 |
+
|
| 12 |
+
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
|
| 13 |
+
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@get_duration
|
| 17 |
+
def read(dataset: datasets.Dataset, length):
|
| 18 |
+
for i in range(length):
|
| 19 |
+
_ = dataset[i]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@get_duration
|
| 23 |
+
def read_batch(dataset: datasets.Dataset, length, batch_size):
|
| 24 |
+
for i in range(0, len(dataset), batch_size):
|
| 25 |
+
_ = dataset[i : i + batch_size]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@get_duration
|
| 29 |
+
def read_formatted(dataset: datasets.Dataset, length, type):
|
| 30 |
+
with dataset.formatted_as(type=type):
|
| 31 |
+
for i in range(length):
|
| 32 |
+
_ = dataset[i]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@get_duration
|
| 36 |
+
def read_formatted_batch(dataset: datasets.Dataset, length, batch_size, type):
|
| 37 |
+
with dataset.formatted_as(type=type):
|
| 38 |
+
for i in range(0, length, batch_size):
|
| 39 |
+
_ = dataset[i : i + batch_size]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def benchmark_iterating():
|
| 43 |
+
times = {"num examples": SPEED_TEST_N_EXAMPLES}
|
| 44 |
+
functions = [
|
| 45 |
+
(read, {"length": SMALL_TEST}),
|
| 46 |
+
(read, {"length": SPEED_TEST_N_EXAMPLES}),
|
| 47 |
+
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
|
| 48 |
+
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
|
| 49 |
+
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
|
| 50 |
+
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
|
| 51 |
+
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
|
| 52 |
+
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
|
| 53 |
+
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
|
| 54 |
+
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
|
| 55 |
+
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
functions_shuffled = [
|
| 59 |
+
(read, {"length": SMALL_TEST}),
|
| 60 |
+
(read, {"length": SPEED_TEST_N_EXAMPLES}),
|
| 61 |
+
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
|
| 62 |
+
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
|
| 63 |
+
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
|
| 64 |
+
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
|
| 65 |
+
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
|
| 66 |
+
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
|
| 67 |
+
]
|
| 68 |
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
| 69 |
+
print("generating dataset")
|
| 70 |
+
features = datasets.Features(
|
| 71 |
+
{"list": datasets.Sequence(datasets.Value("float32")), "numbers": datasets.Value("float32")}
|
| 72 |
+
)
|
| 73 |
+
dataset = generate_example_dataset(
|
| 74 |
+
os.path.join(tmp_dir, "dataset.arrow"),
|
| 75 |
+
features,
|
| 76 |
+
num_examples=SPEED_TEST_N_EXAMPLES,
|
| 77 |
+
seq_shapes={"list": (100,)},
|
| 78 |
+
)
|
| 79 |
+
print("first set of iterations")
|
| 80 |
+
for func, kwargs in functions:
|
| 81 |
+
print(func.__name__, str(kwargs))
|
| 82 |
+
times[func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(dataset, **kwargs)
|
| 83 |
+
|
| 84 |
+
print("shuffling dataset")
|
| 85 |
+
dataset = dataset.shuffle()
|
| 86 |
+
print("Second set of iterations (after shuffling")
|
| 87 |
+
for func, kwargs in functions_shuffled:
|
| 88 |
+
print("shuffled ", func.__name__, str(kwargs))
|
| 89 |
+
times["shuffled " + func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(
|
| 90 |
+
dataset, **kwargs
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
with open(RESULTS_FILE_PATH, "wb") as f:
|
| 94 |
+
f.write(json.dumps(times).encode("utf-8"))
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
if __name__ == "__main__": # useful to run the profiler
|
| 98 |
+
benchmark_iterating()
|
testbed/huggingface__datasets/benchmarks/benchmark_map_filter.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import tempfile
|
| 4 |
+
|
| 5 |
+
import transformers
|
| 6 |
+
|
| 7 |
+
import datasets
|
| 8 |
+
from utils import generate_example_dataset, get_duration
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
SPEED_TEST_N_EXAMPLES = 500_000
|
| 12 |
+
|
| 13 |
+
RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__)
|
| 14 |
+
RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@get_duration
|
| 18 |
+
def map(dataset: datasets.Dataset, **kwargs):
|
| 19 |
+
_ = dataset.map(**kwargs)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@get_duration
|
| 23 |
+
def filter(dataset: datasets.Dataset, **kwargs):
|
| 24 |
+
_ = dataset.filter(**kwargs)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def benchmark_map_filter():
|
| 28 |
+
times = {"num examples": SPEED_TEST_N_EXAMPLES}
|
| 29 |
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
| 30 |
+
features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")})
|
| 31 |
+
dataset = generate_example_dataset(
|
| 32 |
+
os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained("bert-base-cased", use_fast=True)
|
| 36 |
+
|
| 37 |
+
def tokenize(examples):
|
| 38 |
+
return tokenizer(examples["text"])
|
| 39 |
+
|
| 40 |
+
times["map identity"] = map(dataset)
|
| 41 |
+
|
| 42 |
+
times["map identity batched"] = map(dataset, batched=True)
|
| 43 |
+
|
| 44 |
+
times["map no-op batched"] = map(dataset, function=lambda x: None, batched=True)
|
| 45 |
+
|
| 46 |
+
with dataset.formatted_as(type="numpy"):
|
| 47 |
+
times["map no-op batched numpy"] = map(dataset, function=lambda x: None, batched=True)
|
| 48 |
+
|
| 49 |
+
with dataset.formatted_as(type="pandas"):
|
| 50 |
+
times["map no-op batched pandas"] = map(dataset, function=lambda x: None, batched=True)
|
| 51 |
+
|
| 52 |
+
with dataset.formatted_as(type="torch", columns="numbers"):
|
| 53 |
+
times["map no-op batched pytorch"] = map(dataset, function=lambda x: None, batched=True)
|
| 54 |
+
|
| 55 |
+
with dataset.formatted_as(type="tensorflow", columns="numbers"):
|
| 56 |
+
times["map no-op batched tensorflow"] = map(dataset, function=lambda x: None, batched=True)
|
| 57 |
+
|
| 58 |
+
times["map fast-tokenizer batched"] = map(dataset, function=tokenize, batched=True)
|
| 59 |
+
|
| 60 |
+
times["filter"] = filter(dataset)
|
| 61 |
+
|
| 62 |
+
# Activate later when tokenizer support batched inputs
|
| 63 |
+
# with dataset.formatted_as(type='numpy'):
|
| 64 |
+
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
|
| 65 |
+
|
| 66 |
+
with open(RESULTS_FILE_PATH, "wb") as f:
|
| 67 |
+
f.write(json.dumps(times).encode("utf-8"))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
if __name__ == "__main__": # useful to run the profiler
|
| 71 |
+
benchmark_map_filter()
|
testbed/huggingface__datasets/benchmarks/format.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def format_json_to_md(input_json_file, output_md_file):
|
| 6 |
+
with open(input_json_file, encoding="utf-8") as f:
|
| 7 |
+
results = json.load(f)
|
| 8 |
+
|
| 9 |
+
output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
|
| 10 |
+
|
| 11 |
+
for benchmark_name in sorted(results):
|
| 12 |
+
benchmark_res = results[benchmark_name]
|
| 13 |
+
|
| 14 |
+
benchmark_file_name = benchmark_name.split("/")[-1]
|
| 15 |
+
output_md.append(f"### Benchmark: {benchmark_file_name}")
|
| 16 |
+
|
| 17 |
+
title = "| metric |"
|
| 18 |
+
lines = "|--------|"
|
| 19 |
+
value = "| new / old (diff) |"
|
| 20 |
+
for metric_name in sorted(benchmark_res):
|
| 21 |
+
metric_vals = benchmark_res[metric_name]
|
| 22 |
+
new_val = metric_vals["new"]
|
| 23 |
+
old_val = metric_vals.get("old", None)
|
| 24 |
+
dif_val = metric_vals.get("diff", None)
|
| 25 |
+
|
| 26 |
+
val_str = f" {new_val:f}" if isinstance(new_val, (int, float)) else "None"
|
| 27 |
+
|
| 28 |
+
if old_val is not None:
|
| 29 |
+
val_str += f" / {old_val:f}" if isinstance(old_val, (int, float)) else "None"
|
| 30 |
+
if dif_val is not None:
|
| 31 |
+
val_str += f" ({dif_val:f})" if isinstance(dif_val, (int, float)) else "None"
|
| 32 |
+
|
| 33 |
+
title += " " + metric_name + " |"
|
| 34 |
+
lines += "---|"
|
| 35 |
+
value += val_str + " |"
|
| 36 |
+
|
| 37 |
+
output_md += [title, lines, value, " "]
|
| 38 |
+
|
| 39 |
+
output_md.append("</details>")
|
| 40 |
+
|
| 41 |
+
with open(output_md_file, "w", encoding="utf-8") as f:
|
| 42 |
+
f.writelines("\n".join(output_md))
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
if __name__ == "__main__":
|
| 46 |
+
input_json_file = sys.argv[1]
|
| 47 |
+
output_md_file = sys.argv[2]
|
| 48 |
+
|
| 49 |
+
format_json_to_md(input_json_file, output_md_file)
|
testbed/huggingface__datasets/benchmarks/results/benchmark_array_xd.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"write_array2d": 0.14168284999323077, "read_unformated after write_array2d": 0.04353281999647152, "read_formatted_as_numpy after write_array2d": 0.1285462469968479, "read_batch_unformated after write_array2d": 0.023109222995117307, "read_batch_formatted_as_numpy after write_array2d": 0.011352884990628809, "read_col_unformated after write_array2d": 0.037052362007671036, "read_col_formatted_as_numpy after write_array2d": 0.007985618998645805, "write_nested_sequence": 1.4927163410029607, "read_unformated after write_nested_sequence": 0.28319963401008863, "read_formatted_as_numpy after write_nested_sequence": 0.419271487990045, "read_batch_unformated after write_nested_sequence": 0.3234798710036557, "read_batch_formatted_as_numpy after write_nested_sequence": 0.03850809299910907, "read_col_unformated after write_nested_sequence": 0.29384092400141526, "read_col_formatted_as_numpy after write_nested_sequence": 0.004250421989127062, "write_flattened_sequence": 1.4521546780015342, "read_unformated after write_flattened_sequence": 0.25513897799828555, "read_formatted_as_numpy after write_flattened_sequence": 0.07564631900459062, "read_batch_unformated after write_flattened_sequence": 0.2758980469952803, "read_batch_formatted_as_numpy after write_flattened_sequence": 0.011008214991306886, "read_col_unformated after write_flattened_sequence": 0.25848906899045687, "read_col_formatted_as_numpy after write_flattened_sequence": 0.004328447001171298}
|
testbed/huggingface__datasets/benchmarks/results/benchmark_getitem_100B.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"num examples": 100000000000, "get_first_row": 0.00019991099999927542, "get_last_row": 5.4411000000698095e-05, "get_batch_of_1024_rows": 0.0004897069999998394, "get_batch_of_1024_random_rows": 0.01800621099999944}
|
testbed/huggingface__datasets/benchmarks/results/benchmark_iterating.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"num examples": 50000, "read 5000": 0.2152090710005723, "read 50000": 2.077654693988734, "read_batch 50000 10": 1.5041199039987987, "read_batch 50000 100": 1.5411947140091797, "read_batch 50000 1000": 1.4684901159926085, "read_formatted numpy 5000": 4.584776938994764, "read_formatted pandas 5000": 3.7457121399929747, "read_formatted torch 5000": 4.565676491998602, "read_formatted tensorflow 5000": 5.269861594992108, "read_formatted_batch numpy 5000 10": 0.4242750950070331, "read_formatted_batch numpy 5000 1000": 0.007607111998368055, "shuffled read 5000": 0.22604441999283154, "shuffled read 50000": 2.268928524994408, "shuffled read_batch 50000 10": 55.44462437101174, "shuffled read_batch 50000 100": 6.876476717996411, "shuffled read_batch 50000 1000": 2.1420724369963864, "shuffled read_formatted numpy 5000": 4.8052272600034485, "shuffled read_formatted_batch numpy 5000 10": 6.500664097999106, "shuffled read_formatted_batch numpy 5000 1000": 0.0754691059992183}
|
testbed/huggingface__datasets/benchmarks/results/benchmark_map_filter.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"num examples": 500000, "map identity": 10.19139202599763, "map identity batched": 0.6804238399927272, "map no-op batched": 0.5342009569867514, "map no-op batched numpy": 0.5792830920108827, "map no-op batched pandas": 0.4343639040016569, "map no-op batched pytorch": 0.5403374370071106, "map no-op batched tensorflow": 1.3869360350072384, "map fast-tokenizer batched": 8.074308118986664, "filter": 1.841787679004483}
|
testbed/huggingface__datasets/benchmarks/utils.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import timeit
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
import datasets
|
| 6 |
+
from datasets.arrow_writer import ArrowWriter
|
| 7 |
+
from datasets.features.features import _ArrayXD
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def get_duration(func):
|
| 11 |
+
def wrapper(*args, **kwargs):
|
| 12 |
+
starttime = timeit.default_timer()
|
| 13 |
+
_ = func(*args, **kwargs)
|
| 14 |
+
delta = timeit.default_timer() - starttime
|
| 15 |
+
return delta
|
| 16 |
+
|
| 17 |
+
wrapper.__name__ = func.__name__
|
| 18 |
+
|
| 19 |
+
return wrapper
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def generate_examples(features: dict, num_examples=100, seq_shapes=None):
|
| 23 |
+
dummy_data = []
|
| 24 |
+
seq_shapes = seq_shapes or {}
|
| 25 |
+
for i in range(num_examples):
|
| 26 |
+
example = {}
|
| 27 |
+
for col_id, (k, v) in enumerate(features.items()):
|
| 28 |
+
if isinstance(v, _ArrayXD):
|
| 29 |
+
data = np.random.rand(*v.shape).astype(v.dtype)
|
| 30 |
+
elif isinstance(v, datasets.Value):
|
| 31 |
+
if v.dtype == "string":
|
| 32 |
+
data = "The small grey turtle was surprisingly fast when challenged."
|
| 33 |
+
else:
|
| 34 |
+
data = np.random.randint(10, size=1).astype(v.dtype).item()
|
| 35 |
+
elif isinstance(v, datasets.Sequence):
|
| 36 |
+
while isinstance(v, datasets.Sequence):
|
| 37 |
+
v = v.feature
|
| 38 |
+
shape = seq_shapes[k]
|
| 39 |
+
data = np.random.rand(*shape).astype(v.dtype)
|
| 40 |
+
example[k] = data
|
| 41 |
+
|
| 42 |
+
dummy_data.append((i, example))
|
| 43 |
+
|
| 44 |
+
return dummy_data
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def generate_example_dataset(dataset_path, features, num_examples=100, seq_shapes=None):
|
| 48 |
+
dummy_data = generate_examples(features, num_examples=num_examples, seq_shapes=seq_shapes)
|
| 49 |
+
|
| 50 |
+
with ArrowWriter(features=features, path=dataset_path) as writer:
|
| 51 |
+
for key, record in dummy_data:
|
| 52 |
+
example = features.encode_example(record)
|
| 53 |
+
writer.write(example)
|
| 54 |
+
|
| 55 |
+
num_final_examples, num_bytes = writer.finalize()
|
| 56 |
+
|
| 57 |
+
if not num_final_examples == num_examples:
|
| 58 |
+
raise ValueError(
|
| 59 |
+
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}."
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
dataset = datasets.Dataset.from_file(filename=dataset_path, info=datasets.DatasetInfo(features=features))
|
| 63 |
+
|
| 64 |
+
return dataset
|
testbed/huggingface__datasets/dvc.yaml
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
stages:
|
| 2 |
+
benchmark_array_xd:
|
| 3 |
+
cmd: python ./benchmarks/benchmark_array_xd.py
|
| 4 |
+
deps:
|
| 5 |
+
- ./benchmarks/benchmark_array_xd.py
|
| 6 |
+
metrics:
|
| 7 |
+
- ./benchmarks/results/benchmark_array_xd.json:
|
| 8 |
+
cache: false
|
| 9 |
+
|
| 10 |
+
benchmark_indices_mapping:
|
| 11 |
+
cmd: python ./benchmarks/benchmark_indices_mapping.py
|
| 12 |
+
deps:
|
| 13 |
+
- ./benchmarks/benchmark_indices_mapping.py
|
| 14 |
+
metrics:
|
| 15 |
+
- ./benchmarks/results/benchmark_indices_mapping.json:
|
| 16 |
+
cache: false
|
| 17 |
+
|
| 18 |
+
benchmark_map_filter:
|
| 19 |
+
cmd: python ./benchmarks/benchmark_map_filter.py
|
| 20 |
+
deps:
|
| 21 |
+
- ./benchmarks/benchmark_map_filter.py
|
| 22 |
+
metrics:
|
| 23 |
+
- ./benchmarks/results/benchmark_map_filter.json:
|
| 24 |
+
cache: false
|
| 25 |
+
|
| 26 |
+
benchmark_iterating:
|
| 27 |
+
cmd: python ./benchmarks/benchmark_iterating.py
|
| 28 |
+
deps:
|
| 29 |
+
- ./benchmarks/benchmark_iterating.py
|
| 30 |
+
metrics:
|
| 31 |
+
- ./benchmarks/results/benchmark_iterating.json:
|
| 32 |
+
cache: false
|
| 33 |
+
|
| 34 |
+
benchmark_getitem_100B:
|
| 35 |
+
cmd: python ./benchmarks/benchmark_getitem_100B.py
|
| 36 |
+
deps:
|
| 37 |
+
- ./benchmarks/benchmark_getitem_100B.py
|
| 38 |
+
metrics:
|
| 39 |
+
- ./benchmarks/results/benchmark_getitem_100B.json:
|
| 40 |
+
cache: false
|
testbed/huggingface__datasets/metrics/accuracy/README.md
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for Accuracy
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
## Metric Description
|
| 5 |
+
|
| 6 |
+
Accuracy is the proportion of correct predictions among the total number of cases processed. It can be computed with:
|
| 7 |
+
Accuracy = (TP + TN) / (TP + TN + FP + FN)
|
| 8 |
+
Where:
|
| 9 |
+
TP: True positive
|
| 10 |
+
TN: True negative
|
| 11 |
+
FP: False positive
|
| 12 |
+
FN: False negative
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
## How to Use
|
| 16 |
+
|
| 17 |
+
At minimum, this metric requires predictions and references as inputs.
|
| 18 |
+
|
| 19 |
+
```python
|
| 20 |
+
>>> accuracy_metric = datasets.load_metric("accuracy")
|
| 21 |
+
>>> results = accuracy_metric.compute(references=[0, 1], predictions=[0, 1])
|
| 22 |
+
>>> print(results)
|
| 23 |
+
{'accuracy': 1.0}
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
### Inputs
|
| 28 |
+
- **predictions** (`list` of `int`): Predicted labels.
|
| 29 |
+
- **references** (`list` of `int`): Ground truth labels.
|
| 30 |
+
- **normalize** (`boolean`): If set to False, returns the number of correctly classified samples. Otherwise, returns the fraction of correctly classified samples. Defaults to True.
|
| 31 |
+
- **sample_weight** (`list` of `float`): Sample weights Defaults to None.
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
### Output Values
|
| 35 |
+
- **accuracy**(`float` or `int`): Accuracy score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input, if `normalize` is set to `True`.. A higher score means higher accuracy.
|
| 36 |
+
|
| 37 |
+
Output Example(s):
|
| 38 |
+
```python
|
| 39 |
+
{'accuracy': 1.0}
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
This metric outputs a dictionary, containing the accuracy score.
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
#### Values from Popular Papers
|
| 46 |
+
|
| 47 |
+
Top-1 or top-5 accuracy is often used to report performance on supervised classification tasks such as image classification (e.g. on [ImageNet](https://paperswithcode.com/sota/image-classification-on-imagenet)) or sentiment analysis (e.g. on [IMDB](https://paperswithcode.com/sota/text-classification-on-imdb)).
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
### Examples
|
| 51 |
+
|
| 52 |
+
Example 1-A simple example
|
| 53 |
+
```python
|
| 54 |
+
>>> accuracy_metric = datasets.load_metric("accuracy")
|
| 55 |
+
>>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0])
|
| 56 |
+
>>> print(results)
|
| 57 |
+
{'accuracy': 0.5}
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
Example 2-The same as Example 1, except with `normalize` set to `False`.
|
| 61 |
+
```python
|
| 62 |
+
>>> accuracy_metric = datasets.load_metric("accuracy")
|
| 63 |
+
>>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], normalize=False)
|
| 64 |
+
>>> print(results)
|
| 65 |
+
{'accuracy': 3.0}
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
Example 3-The same as Example 1, except with `sample_weight` set.
|
| 69 |
+
```python
|
| 70 |
+
>>> accuracy_metric = datasets.load_metric("accuracy")
|
| 71 |
+
>>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], sample_weight=[0.5, 2, 0.7, 0.5, 9, 0.4])
|
| 72 |
+
>>> print(results)
|
| 73 |
+
{'accuracy': 0.8778625954198473}
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
## Limitations and Bias
|
| 78 |
+
This metric can be easily misleading, especially in the case of unbalanced classes. For example, a high accuracy might be because a model is doing well, but if the data is unbalanced, it might also be because the model is only accurately labeling the high-frequency class. In such cases, a more detailed analysis of the model's behavior, or the use of a different metric entirely, is necessary to determine how well the model is actually performing.
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
## Citation(s)
|
| 82 |
+
```bibtex
|
| 83 |
+
@article{scikit-learn,
|
| 84 |
+
title={Scikit-learn: Machine Learning in {P}ython},
|
| 85 |
+
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
|
| 86 |
+
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
|
| 87 |
+
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
|
| 88 |
+
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
|
| 89 |
+
journal={Journal of Machine Learning Research},
|
| 90 |
+
volume={12},
|
| 91 |
+
pages={2825--2830},
|
| 92 |
+
year={2011}
|
| 93 |
+
}
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
## Further References
|
testbed/huggingface__datasets/metrics/bertscore/README.md
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for BERT Score
|
| 2 |
+
|
| 3 |
+
## Metric description
|
| 4 |
+
|
| 5 |
+
BERTScore is an automatic evaluation metric for text generation that computes a similarity score for each token in the candidate sentence with each token in the reference sentence. It leverages the pre-trained contextual embeddings from [BERT](https://huggingface.co/bert-base-uncased) models and matches words in candidate and reference sentences by cosine similarity.
|
| 6 |
+
|
| 7 |
+
Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language generation tasks.
|
| 8 |
+
|
| 9 |
+
## How to use
|
| 10 |
+
|
| 11 |
+
BERTScore takes 3 mandatory arguments : `predictions` (a list of string of candidate sentences), `references` (a list of strings or list of list of strings of reference sentences) and either `lang` (a string of two letters indicating the language of the sentences, in [ISO 639-1 format](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes)) or `model_type` (a string specififying which model to use, according to the BERT specification). The default behavior of the metric is to use the suggested model for the target language when one is specified, otherwise to use the `model_type` indicated.
|
| 12 |
+
|
| 13 |
+
```python
|
| 14 |
+
from datasets import load_metric
|
| 15 |
+
bertscore = load_metric("bertscore")
|
| 16 |
+
predictions = ["hello there", "general kenobi"]
|
| 17 |
+
references = ["hello there", "general kenobi"]
|
| 18 |
+
results = bertscore.compute(predictions=predictions, references=references, lang="en")
|
| 19 |
+
```
|
| 20 |
+
|
| 21 |
+
BERTScore also accepts multiple optional arguments:
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
`num_layers` (int): The layer of representation to use. The default is the number of layers tuned on WMT16 correlation data, which depends on the `model_type` used.
|
| 25 |
+
|
| 26 |
+
`verbose` (bool): Turn on intermediate status update. The default value is `False`.
|
| 27 |
+
|
| 28 |
+
`idf` (bool or dict): Use idf weighting; can also be a precomputed idf_dict.
|
| 29 |
+
|
| 30 |
+
`device` (str): On which the contextual embedding model will be allocated on. If this argument is `None`, the model lives on `cuda:0` if cuda is available.
|
| 31 |
+
|
| 32 |
+
`nthreads` (int): Number of threads used for computation. The default value is `4`.
|
| 33 |
+
|
| 34 |
+
`rescale_with_baseline` (bool): Rescale BERTScore with the pre-computed baseline. The default value is `False`.
|
| 35 |
+
|
| 36 |
+
`batch_size` (int): BERTScore processing batch size, at least one of `model_type` or `lang`. `lang` needs to be specified when `rescale_with_baseline` is `True`.
|
| 37 |
+
|
| 38 |
+
`baseline_path` (str): Customized baseline file.
|
| 39 |
+
|
| 40 |
+
`use_fast_tokenizer` (bool): `use_fast` parameter passed to HF tokenizer. The default value is `False`.
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
## Output values
|
| 44 |
+
|
| 45 |
+
BERTScore outputs a dictionary with the following values:
|
| 46 |
+
|
| 47 |
+
`precision`: The [precision](https://huggingface.co/metrics/precision) for each sentence from the `predictions` + `references` lists, which ranges from 0.0 to 1.0.
|
| 48 |
+
|
| 49 |
+
`recall`: The [recall](https://huggingface.co/metrics/recall) for each sentence from the `predictions` + `references` lists, which ranges from 0.0 to 1.0.
|
| 50 |
+
|
| 51 |
+
`f1`: The [F1 score](https://huggingface.co/metrics/f1) for each sentence from the `predictions` + `references` lists, which ranges from 0.0 to 1.0.
|
| 52 |
+
|
| 53 |
+
`hashcode:` The hashcode of the library.
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
### Values from popular papers
|
| 57 |
+
The [original BERTScore paper](https://openreview.net/pdf?id=SkeHuCVFDr) reported average model selection accuracies (Hits@1) on WMT18 hybrid systems for different language pairs, which ranged from 0.004 for `en<->tr` to 0.824 for `en<->de`.
|
| 58 |
+
|
| 59 |
+
For more recent model performance, see the [metric leaderboard](https://paperswithcode.com/paper/bertscore-evaluating-text-generation-with).
|
| 60 |
+
|
| 61 |
+
## Examples
|
| 62 |
+
|
| 63 |
+
Maximal values with the `distilbert-base-uncased` model:
|
| 64 |
+
|
| 65 |
+
```python
|
| 66 |
+
from datasets import load_metric
|
| 67 |
+
bertscore = load_metric("bertscore")
|
| 68 |
+
predictions = ["hello world", "general kenobi"]
|
| 69 |
+
references = ["hello world", "general kenobi"]
|
| 70 |
+
results = bertscore.compute(predictions=predictions, references=references, model_type="distilbert-base-uncased")
|
| 71 |
+
print(results)
|
| 72 |
+
{'precision': [1.0, 1.0], 'recall': [1.0, 1.0], 'f1': [1.0, 1.0], 'hashcode': 'distilbert-base-uncased_L5_no-idf_version=0.3.10(hug_trans=4.10.3)'}
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
Partial match with the `bert-base-uncased` model:
|
| 76 |
+
|
| 77 |
+
```python
|
| 78 |
+
from datasets import load_metric
|
| 79 |
+
bertscore = load_metric("bertscore")
|
| 80 |
+
predictions = ["hello world", "general kenobi"]
|
| 81 |
+
references = ["goodnight moon", "the sun is shining"]
|
| 82 |
+
results = bertscore.compute(predictions=predictions, references=references, model_type="distilbert-base-uncased")
|
| 83 |
+
print(results)
|
| 84 |
+
{'precision': [0.7380737066268921, 0.5584042072296143], 'recall': [0.7380737066268921, 0.5889028906822205], 'f1': [0.7380737066268921, 0.5732481479644775], 'hashcode': 'bert-base-uncased_L5_no-idf_version=0.3.10(hug_trans=4.10.3)'}
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
## Limitations and bias
|
| 88 |
+
|
| 89 |
+
The [original BERTScore paper](https://openreview.net/pdf?id=SkeHuCVFDr) showed that BERTScore correlates well with human judgment on sentence-level and system-level evaluation, but this depends on the model and language pair selected.
|
| 90 |
+
|
| 91 |
+
Furthermore, not all languages are supported by the metric -- see the [BERTScore supported language list](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages) for more information.
|
| 92 |
+
|
| 93 |
+
Finally, calculating the BERTScore metric involves downloading the BERT model that is used to compute the score-- the default model for `en`, `roberta-large`, takes over 1.4GB of storage space and downloading it can take a significant amount of time depending on the speed of your internet connection. If this is an issue, choose a smaller model; for instance `distilbert-base-uncased` is 268MB. A full list of compatible models can be found [here](https://docs.google.com/spreadsheets/d/1RKOVpselB98Nnh_EOC4A2BYn8_201tmPODpNWu4w7xI/edit#gid=0).
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
## Citation
|
| 97 |
+
|
| 98 |
+
```bibtex
|
| 99 |
+
@inproceedings{bert-score,
|
| 100 |
+
title={BERTScore: Evaluating Text Generation with BERT},
|
| 101 |
+
author={Tianyi Zhang* and Varsha Kishore* and Felix Wu* and Kilian Q. Weinberger and Yoav Artzi},
|
| 102 |
+
booktitle={International Conference on Learning Representations},
|
| 103 |
+
year={2020},
|
| 104 |
+
url={https://openreview.net/forum?id=SkeHuCVFDr}
|
| 105 |
+
}
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
## Further References
|
| 109 |
+
|
| 110 |
+
- [BERTScore Project README](https://github.com/Tiiiger/bert_score#readme)
|
| 111 |
+
- [BERTScore ICLR 2020 Poster Presentation](https://iclr.cc/virtual_2020/poster_SkeHuCVFDr.html)
|
testbed/huggingface__datasets/metrics/bleurt/bleurt.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
""" BLEURT metric. """
|
| 15 |
+
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
|
| 19 |
+
|
| 20 |
+
import datasets
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = datasets.logging.get_logger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_CITATION = """\
|
| 27 |
+
@inproceedings{bleurt,
|
| 28 |
+
title={BLEURT: Learning Robust Metrics for Text Generation},
|
| 29 |
+
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
|
| 30 |
+
booktitle={ACL},
|
| 31 |
+
year={2020},
|
| 32 |
+
url={https://arxiv.org/abs/2004.04696}
|
| 33 |
+
}
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
_DESCRIPTION = """\
|
| 37 |
+
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
|
| 38 |
+
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
|
| 39 |
+
it for your specific application (the latter is expected to perform better).
|
| 40 |
+
|
| 41 |
+
See the project's README at https://github.com/google-research/bleurt#readme for more information.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
_KWARGS_DESCRIPTION = """
|
| 45 |
+
BLEURT score.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
`predictions` (list of str): prediction/candidate sentences
|
| 49 |
+
`references` (list of str): reference sentences
|
| 50 |
+
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
'scores': List of scores.
|
| 54 |
+
Examples:
|
| 55 |
+
|
| 56 |
+
>>> predictions = ["hello there", "general kenobi"]
|
| 57 |
+
>>> references = ["hello there", "general kenobi"]
|
| 58 |
+
>>> bleurt = datasets.load_metric("bleurt")
|
| 59 |
+
>>> results = bleurt.compute(predictions=predictions, references=references)
|
| 60 |
+
>>> print([round(v, 2) for v in results["scores"]])
|
| 61 |
+
[1.03, 1.04]
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
CHECKPOINT_URLS = {
|
| 65 |
+
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
|
| 66 |
+
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
|
| 67 |
+
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
|
| 68 |
+
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
|
| 69 |
+
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
|
| 70 |
+
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
|
| 71 |
+
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
|
| 72 |
+
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
|
| 73 |
+
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
|
| 74 |
+
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 79 |
+
class BLEURT(datasets.Metric):
|
| 80 |
+
def _info(self):
|
| 81 |
+
return datasets.MetricInfo(
|
| 82 |
+
description=_DESCRIPTION,
|
| 83 |
+
citation=_CITATION,
|
| 84 |
+
homepage="https://github.com/google-research/bleurt",
|
| 85 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 86 |
+
features=datasets.Features(
|
| 87 |
+
{
|
| 88 |
+
"predictions": datasets.Value("string", id="sequence"),
|
| 89 |
+
"references": datasets.Value("string", id="sequence"),
|
| 90 |
+
}
|
| 91 |
+
),
|
| 92 |
+
codebase_urls=["https://github.com/google-research/bleurt"],
|
| 93 |
+
reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"],
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
def _download_and_prepare(self, dl_manager):
|
| 97 |
+
# check that config name specifies a valid BLEURT model
|
| 98 |
+
if self.config_name == "default":
|
| 99 |
+
logger.warning(
|
| 100 |
+
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
|
| 101 |
+
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')."
|
| 102 |
+
)
|
| 103 |
+
self.config_name = "bleurt-base-128"
|
| 104 |
+
|
| 105 |
+
if self.config_name.lower() in CHECKPOINT_URLS:
|
| 106 |
+
checkpoint_name = self.config_name.lower()
|
| 107 |
+
|
| 108 |
+
elif self.config_name.upper() in CHECKPOINT_URLS:
|
| 109 |
+
checkpoint_name = self.config_name.upper()
|
| 110 |
+
|
| 111 |
+
else:
|
| 112 |
+
raise KeyError(
|
| 113 |
+
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}"
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
# download the model checkpoint specified by self.config_name and set up the scorer
|
| 117 |
+
model_path = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
|
| 118 |
+
self.scorer = score.BleurtScorer(os.path.join(model_path, checkpoint_name))
|
| 119 |
+
|
| 120 |
+
def _compute(self, predictions, references):
|
| 121 |
+
scores = self.scorer.score(references=references, candidates=predictions)
|
| 122 |
+
return {"scores": scores}
|
testbed/huggingface__datasets/metrics/cer/cer.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
""" Character Error Ratio (CER) metric. """
|
| 15 |
+
|
| 16 |
+
from typing import List
|
| 17 |
+
|
| 18 |
+
import jiwer
|
| 19 |
+
import jiwer.transforms as tr
|
| 20 |
+
from packaging import version
|
| 21 |
+
|
| 22 |
+
import datasets
|
| 23 |
+
from datasets.config import PY_VERSION
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
if PY_VERSION < version.parse("3.8"):
|
| 27 |
+
import importlib_metadata
|
| 28 |
+
else:
|
| 29 |
+
import importlib.metadata as importlib_metadata
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
SENTENCE_DELIMITER = ""
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
|
| 36 |
+
|
| 37 |
+
class SentencesToListOfCharacters(tr.AbstractTransform):
|
| 38 |
+
def __init__(self, sentence_delimiter: str = " "):
|
| 39 |
+
self.sentence_delimiter = sentence_delimiter
|
| 40 |
+
|
| 41 |
+
def process_string(self, s: str):
|
| 42 |
+
return list(s)
|
| 43 |
+
|
| 44 |
+
def process_list(self, inp: List[str]):
|
| 45 |
+
chars = []
|
| 46 |
+
for sent_idx, sentence in enumerate(inp):
|
| 47 |
+
chars.extend(self.process_string(sentence))
|
| 48 |
+
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(inp) - 1:
|
| 49 |
+
chars.append(self.sentence_delimiter)
|
| 50 |
+
return chars
|
| 51 |
+
|
| 52 |
+
cer_transform = tr.Compose(
|
| 53 |
+
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
|
| 54 |
+
)
|
| 55 |
+
else:
|
| 56 |
+
cer_transform = tr.Compose(
|
| 57 |
+
[
|
| 58 |
+
tr.RemoveMultipleSpaces(),
|
| 59 |
+
tr.Strip(),
|
| 60 |
+
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
|
| 61 |
+
tr.ReduceToListOfListOfChars(),
|
| 62 |
+
]
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
_CITATION = """\
|
| 67 |
+
@inproceedings{inproceedings,
|
| 68 |
+
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
|
| 69 |
+
year = {2004},
|
| 70 |
+
month = {01},
|
| 71 |
+
pages = {},
|
| 72 |
+
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
|
| 73 |
+
}
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
_DESCRIPTION = """\
|
| 77 |
+
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
|
| 78 |
+
|
| 79 |
+
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
|
| 80 |
+
|
| 81 |
+
Character error rate can be computed as:
|
| 82 |
+
|
| 83 |
+
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
|
| 84 |
+
|
| 85 |
+
where
|
| 86 |
+
|
| 87 |
+
S is the number of substitutions,
|
| 88 |
+
D is the number of deletions,
|
| 89 |
+
I is the number of insertions,
|
| 90 |
+
C is the number of correct characters,
|
| 91 |
+
N is the number of characters in the reference (N=S+D+C).
|
| 92 |
+
|
| 93 |
+
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
|
| 94 |
+
performance of the ASR system with a CER of 0 being a perfect score.
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
_KWARGS_DESCRIPTION = """
|
| 98 |
+
Computes CER score of transcribed segments against references.
|
| 99 |
+
Args:
|
| 100 |
+
references: list of references for each speech input.
|
| 101 |
+
predictions: list of transcribtions to score.
|
| 102 |
+
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
|
| 103 |
+
Returns:
|
| 104 |
+
(float): the character error rate
|
| 105 |
+
|
| 106 |
+
Examples:
|
| 107 |
+
|
| 108 |
+
>>> predictions = ["this is the prediction", "there is an other sample"]
|
| 109 |
+
>>> references = ["this is the reference", "there is another one"]
|
| 110 |
+
>>> cer = datasets.load_metric("cer")
|
| 111 |
+
>>> cer_score = cer.compute(predictions=predictions, references=references)
|
| 112 |
+
>>> print(cer_score)
|
| 113 |
+
0.34146341463414637
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 118 |
+
class CER(datasets.Metric):
|
| 119 |
+
def _info(self):
|
| 120 |
+
return datasets.MetricInfo(
|
| 121 |
+
description=_DESCRIPTION,
|
| 122 |
+
citation=_CITATION,
|
| 123 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 124 |
+
features=datasets.Features(
|
| 125 |
+
{
|
| 126 |
+
"predictions": datasets.Value("string", id="sequence"),
|
| 127 |
+
"references": datasets.Value("string", id="sequence"),
|
| 128 |
+
}
|
| 129 |
+
),
|
| 130 |
+
codebase_urls=["https://github.com/jitsi/jiwer/"],
|
| 131 |
+
reference_urls=[
|
| 132 |
+
"https://en.wikipedia.org/wiki/Word_error_rate",
|
| 133 |
+
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
|
| 134 |
+
],
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
def _compute(self, predictions, references, concatenate_texts=False):
|
| 138 |
+
if concatenate_texts:
|
| 139 |
+
return jiwer.compute_measures(
|
| 140 |
+
references,
|
| 141 |
+
predictions,
|
| 142 |
+
truth_transform=cer_transform,
|
| 143 |
+
hypothesis_transform=cer_transform,
|
| 144 |
+
)["wer"]
|
| 145 |
+
|
| 146 |
+
incorrect = 0
|
| 147 |
+
total = 0
|
| 148 |
+
for prediction, reference in zip(predictions, references):
|
| 149 |
+
measures = jiwer.compute_measures(
|
| 150 |
+
reference,
|
| 151 |
+
prediction,
|
| 152 |
+
truth_transform=cer_transform,
|
| 153 |
+
hypothesis_transform=cer_transform,
|
| 154 |
+
)
|
| 155 |
+
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
|
| 156 |
+
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
|
| 157 |
+
|
| 158 |
+
return incorrect / total
|
testbed/huggingface__datasets/metrics/cer/test_cer.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import unittest
|
| 15 |
+
|
| 16 |
+
from cer import CER
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
cer = CER()
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class TestCER(unittest.TestCase):
|
| 23 |
+
def test_cer_case_senstive(self):
|
| 24 |
+
refs = ["White House"]
|
| 25 |
+
preds = ["white house"]
|
| 26 |
+
# S = 2, D = 0, I = 0, N = 11, CER = 2 / 11
|
| 27 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 28 |
+
self.assertTrue(abs(char_error_rate - 0.1818181818) < 1e-6)
|
| 29 |
+
|
| 30 |
+
def test_cer_whitespace(self):
|
| 31 |
+
refs = ["were wolf"]
|
| 32 |
+
preds = ["werewolf"]
|
| 33 |
+
# S = 0, D = 0, I = 1, N = 9, CER = 1 / 9
|
| 34 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 35 |
+
self.assertTrue(abs(char_error_rate - 0.1111111) < 1e-6)
|
| 36 |
+
|
| 37 |
+
refs = ["werewolf"]
|
| 38 |
+
preds = ["weae wolf"]
|
| 39 |
+
# S = 1, D = 1, I = 0, N = 8, CER = 0.25
|
| 40 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 41 |
+
self.assertTrue(abs(char_error_rate - 0.25) < 1e-6)
|
| 42 |
+
|
| 43 |
+
# consecutive whitespaces case 1
|
| 44 |
+
refs = ["were wolf"]
|
| 45 |
+
preds = ["were wolf"]
|
| 46 |
+
# S = 0, D = 0, I = 0, N = 9, CER = 0
|
| 47 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 48 |
+
self.assertTrue(abs(char_error_rate - 0.0) < 1e-6)
|
| 49 |
+
|
| 50 |
+
# consecutive whitespaces case 2
|
| 51 |
+
refs = ["were wolf"]
|
| 52 |
+
preds = ["were wolf"]
|
| 53 |
+
# S = 0, D = 0, I = 0, N = 9, CER = 0
|
| 54 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 55 |
+
self.assertTrue(abs(char_error_rate - 0.0) < 1e-6)
|
| 56 |
+
|
| 57 |
+
def test_cer_sub(self):
|
| 58 |
+
refs = ["werewolf"]
|
| 59 |
+
preds = ["weaewolf"]
|
| 60 |
+
# S = 1, D = 0, I = 0, N = 8, CER = 0.125
|
| 61 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 62 |
+
self.assertTrue(abs(char_error_rate - 0.125) < 1e-6)
|
| 63 |
+
|
| 64 |
+
def test_cer_del(self):
|
| 65 |
+
refs = ["werewolf"]
|
| 66 |
+
preds = ["wereawolf"]
|
| 67 |
+
# S = 0, D = 1, I = 0, N = 8, CER = 0.125
|
| 68 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 69 |
+
self.assertTrue(abs(char_error_rate - 0.125) < 1e-6)
|
| 70 |
+
|
| 71 |
+
def test_cer_insert(self):
|
| 72 |
+
refs = ["werewolf"]
|
| 73 |
+
preds = ["wereolf"]
|
| 74 |
+
# S = 0, D = 0, I = 1, N = 8, CER = 0.125
|
| 75 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 76 |
+
self.assertTrue(abs(char_error_rate - 0.125) < 1e-6)
|
| 77 |
+
|
| 78 |
+
def test_cer_equal(self):
|
| 79 |
+
refs = ["werewolf"]
|
| 80 |
+
char_error_rate = cer.compute(predictions=refs, references=refs)
|
| 81 |
+
self.assertEqual(char_error_rate, 0.0)
|
| 82 |
+
|
| 83 |
+
def test_cer_list_of_seqs(self):
|
| 84 |
+
refs = ["werewolf", "I am your father"]
|
| 85 |
+
char_error_rate = cer.compute(predictions=refs, references=refs)
|
| 86 |
+
self.assertEqual(char_error_rate, 0.0)
|
| 87 |
+
|
| 88 |
+
refs = ["werewolf", "I am your father", "doge"]
|
| 89 |
+
preds = ["werxwolf", "I am your father", "doge"]
|
| 90 |
+
# S = 1, D = 0, I = 0, N = 28, CER = 1 / 28
|
| 91 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 92 |
+
self.assertTrue(abs(char_error_rate - 0.03571428) < 1e-6)
|
| 93 |
+
|
| 94 |
+
def test_correlated_sentences(self):
|
| 95 |
+
refs = ["My hovercraft", "is full of eels"]
|
| 96 |
+
preds = ["My hovercraft is full", " of eels"]
|
| 97 |
+
# S = 0, D = 0, I = 2, N = 28, CER = 2 / 28
|
| 98 |
+
# whitespace at the front of " of eels" will be strip during preporcessing
|
| 99 |
+
# so need to insert 2 whitespaces
|
| 100 |
+
char_error_rate = cer.compute(predictions=preds, references=refs, concatenate_texts=True)
|
| 101 |
+
self.assertTrue(abs(char_error_rate - 0.071428) < 1e-6)
|
| 102 |
+
|
| 103 |
+
def test_cer_unicode(self):
|
| 104 |
+
refs = ["我能吞下玻璃而不伤身体"]
|
| 105 |
+
preds = [" 能吞虾玻璃而 不霜身体啦"]
|
| 106 |
+
# S = 3, D = 2, I = 0, N = 11, CER = 5 / 11
|
| 107 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 108 |
+
self.assertTrue(abs(char_error_rate - 0.4545454545) < 1e-6)
|
| 109 |
+
|
| 110 |
+
refs = ["我能吞下玻璃", "而不伤身体"]
|
| 111 |
+
preds = ["我 能 吞 下 玻 璃", "而不伤身体"]
|
| 112 |
+
# S = 0, D = 5, I = 0, N = 11, CER = 5 / 11
|
| 113 |
+
char_error_rate = cer.compute(predictions=preds, references=refs)
|
| 114 |
+
self.assertTrue(abs(char_error_rate - 0.454545454545) < 1e-6)
|
| 115 |
+
|
| 116 |
+
refs = ["我能吞下玻璃而不伤身体"]
|
| 117 |
+
char_error_rate = cer.compute(predictions=refs, references=refs)
|
| 118 |
+
self.assertFalse(char_error_rate, 0.0)
|
| 119 |
+
|
| 120 |
+
def test_cer_empty(self):
|
| 121 |
+
refs = [""]
|
| 122 |
+
preds = ["Hypothesis"]
|
| 123 |
+
with self.assertRaises(ValueError):
|
| 124 |
+
cer.compute(predictions=preds, references=refs)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
if __name__ == "__main__":
|
| 128 |
+
unittest.main()
|
testbed/huggingface__datasets/metrics/chrf/README.md
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for chrF(++)
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
## Metric Description
|
| 5 |
+
ChrF and ChrF++ are two MT evaluation metrics that use the F-score statistic for character n-gram matches. ChrF++ additionally includes word n-grams, which correlate more strongly with direct assessment. We use the implementation that is already present in sacrebleu.
|
| 6 |
+
|
| 7 |
+
While this metric is included in sacreBLEU, the implementation here is slightly different from sacreBLEU in terms of the required input format. Here, the length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
|
| 8 |
+
|
| 9 |
+
See the [sacreBLEU README.md](https://github.com/mjpost/sacreBLEU#chrf--chrf) for more information.
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## How to Use
|
| 13 |
+
At minimum, this metric requires a `list` of predictions and a `list` of `list`s of references:
|
| 14 |
+
```python
|
| 15 |
+
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
|
| 16 |
+
>>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
|
| 17 |
+
>>> chrf = datasets.load_metric("chrf")
|
| 18 |
+
>>> results = chrf.compute(predictions=prediction, references=reference)
|
| 19 |
+
>>> print(results)
|
| 20 |
+
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
### Inputs
|
| 24 |
+
- **`predictions`** (`list` of `str`): The predicted sentences.
|
| 25 |
+
- **`references`** (`list` of `list` of `str`): The references. There should be one reference sub-list for each prediction sentence.
|
| 26 |
+
- **`char_order`** (`int`): Character n-gram order. Defaults to `6`.
|
| 27 |
+
- **`word_order`** (`int`): Word n-gram order. If equals to 2, the metric is referred to as chrF++. Defaults to `0`.
|
| 28 |
+
- **`beta`** (`int`): Determine the importance of recall w.r.t precision. Defaults to `2`.
|
| 29 |
+
- **`lowercase`** (`bool`): If `True`, enables case-insensitivity. Defaults to `False`.
|
| 30 |
+
- **`whitespace`** (`bool`): If `True`, include whitespaces when extracting character n-grams. Defaults to `False`.
|
| 31 |
+
- **`eps_smoothing`** (`bool`): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK, and Moses implementations. If `False`, takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
### Output Values
|
| 36 |
+
The output is a dictionary containing the following fields:
|
| 37 |
+
- **`'score'`** (`float`): The chrF (chrF++) score.
|
| 38 |
+
- **`'char_order'`** (`int`): The character n-gram order.
|
| 39 |
+
- **`'word_order'`** (`int`): The word n-gram order. If equals to `2`, the metric is referred to as chrF++.
|
| 40 |
+
- **`'beta'`** (`int`): Determine the importance of recall w.r.t precision.
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
The output is formatted as below:
|
| 44 |
+
```python
|
| 45 |
+
{'score': 61.576379378113785, 'char_order': 6, 'word_order': 0, 'beta': 2}
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
The chrF(++) score can be any value between `0.0` and `100.0`, inclusive.
|
| 49 |
+
|
| 50 |
+
#### Values from Popular Papers
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
### Examples
|
| 54 |
+
A simple example of calculating chrF:
|
| 55 |
+
```python
|
| 56 |
+
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
|
| 57 |
+
>>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
|
| 58 |
+
>>> chrf = datasets.load_metric("chrf")
|
| 59 |
+
>>> results = chrf.compute(predictions=prediction, references=reference)
|
| 60 |
+
>>> print(results)
|
| 61 |
+
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
The same example, but with the argument `word_order=2`, to calculate chrF++ instead of chrF:
|
| 65 |
+
```python
|
| 66 |
+
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
|
| 67 |
+
>>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
|
| 68 |
+
>>> chrf = datasets.load_metric("chrf")
|
| 69 |
+
>>> results = chrf.compute(predictions=prediction,
|
| 70 |
+
... references=reference,
|
| 71 |
+
... word_order=2)
|
| 72 |
+
>>> print(results)
|
| 73 |
+
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
The same chrF++ example as above, but with `lowercase=True` to normalize all case:
|
| 77 |
+
```python
|
| 78 |
+
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
|
| 79 |
+
>>> reference = [["The relationship between dogs and cats is not exactly friendly.", ], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
|
| 80 |
+
>>> chrf = datasets.load_metric("chrf")
|
| 81 |
+
>>> results = chrf.compute(predictions=prediction,
|
| 82 |
+
... references=reference,
|
| 83 |
+
... word_order=2,
|
| 84 |
+
... lowercase=True)
|
| 85 |
+
>>> print(results)
|
| 86 |
+
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
## Limitations and Bias
|
| 91 |
+
- According to [Popović 2017](https://www.statmt.org/wmt17/pdf/WMT70.pdf), chrF+ (where `word_order=1`) and chrF++ (where `word_order=2`) produce scores that correlate better with human judgements than chrF (where `word_order=0`) does.
|
| 92 |
+
|
| 93 |
+
## Citation
|
| 94 |
+
```bibtex
|
| 95 |
+
@inproceedings{popovic-2015-chrf,
|
| 96 |
+
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
|
| 97 |
+
author = "Popovi{\'c}, Maja",
|
| 98 |
+
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
|
| 99 |
+
month = sep,
|
| 100 |
+
year = "2015",
|
| 101 |
+
address = "Lisbon, Portugal",
|
| 102 |
+
publisher = "Association for Computational Linguistics",
|
| 103 |
+
url = "https://aclanthology.org/W15-3049",
|
| 104 |
+
doi = "10.18653/v1/W15-3049",
|
| 105 |
+
pages = "392--395",
|
| 106 |
+
}
|
| 107 |
+
@inproceedings{popovic-2017-chrf,
|
| 108 |
+
title = "chr{F}++: words helping character n-grams",
|
| 109 |
+
author = "Popovi{\'c}, Maja",
|
| 110 |
+
booktitle = "Proceedings of the Second Conference on Machine Translation",
|
| 111 |
+
month = sep,
|
| 112 |
+
year = "2017",
|
| 113 |
+
address = "Copenhagen, Denmark",
|
| 114 |
+
publisher = "Association for Computational Linguistics",
|
| 115 |
+
url = "https://aclanthology.org/W17-4770",
|
| 116 |
+
doi = "10.18653/v1/W17-4770",
|
| 117 |
+
pages = "612--618",
|
| 118 |
+
}
|
| 119 |
+
@inproceedings{post-2018-call,
|
| 120 |
+
title = "A Call for Clarity in Reporting {BLEU} Scores",
|
| 121 |
+
author = "Post, Matt",
|
| 122 |
+
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
|
| 123 |
+
month = oct,
|
| 124 |
+
year = "2018",
|
| 125 |
+
address = "Belgium, Brussels",
|
| 126 |
+
publisher = "Association for Computational Linguistics",
|
| 127 |
+
url = "https://www.aclweb.org/anthology/W18-6319",
|
| 128 |
+
pages = "186--191",
|
| 129 |
+
}
|
| 130 |
+
```
|
| 131 |
+
|
| 132 |
+
## Further References
|
| 133 |
+
- See the [sacreBLEU README.md](https://github.com/mjpost/sacreBLEU#chrf--chrf) for more information on this implementation.
|
testbed/huggingface__datasets/metrics/chrf/chrf.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
""" Chrf(++) metric as available in sacrebleu. """
|
| 15 |
+
import sacrebleu as scb
|
| 16 |
+
from packaging import version
|
| 17 |
+
from sacrebleu import CHRF
|
| 18 |
+
|
| 19 |
+
import datasets
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
_CITATION = """\
|
| 23 |
+
@inproceedings{popovic-2015-chrf,
|
| 24 |
+
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
|
| 25 |
+
author = "Popovi{\'c}, Maja",
|
| 26 |
+
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
|
| 27 |
+
month = sep,
|
| 28 |
+
year = "2015",
|
| 29 |
+
address = "Lisbon, Portugal",
|
| 30 |
+
publisher = "Association for Computational Linguistics",
|
| 31 |
+
url = "https://aclanthology.org/W15-3049",
|
| 32 |
+
doi = "10.18653/v1/W15-3049",
|
| 33 |
+
pages = "392--395",
|
| 34 |
+
}
|
| 35 |
+
@inproceedings{popovic-2017-chrf,
|
| 36 |
+
title = "chr{F}++: words helping character n-grams",
|
| 37 |
+
author = "Popovi{\'c}, Maja",
|
| 38 |
+
booktitle = "Proceedings of the Second Conference on Machine Translation",
|
| 39 |
+
month = sep,
|
| 40 |
+
year = "2017",
|
| 41 |
+
address = "Copenhagen, Denmark",
|
| 42 |
+
publisher = "Association for Computational Linguistics",
|
| 43 |
+
url = "https://aclanthology.org/W17-4770",
|
| 44 |
+
doi = "10.18653/v1/W17-4770",
|
| 45 |
+
pages = "612--618",
|
| 46 |
+
}
|
| 47 |
+
@inproceedings{post-2018-call,
|
| 48 |
+
title = "A Call for Clarity in Reporting {BLEU} Scores",
|
| 49 |
+
author = "Post, Matt",
|
| 50 |
+
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
|
| 51 |
+
month = oct,
|
| 52 |
+
year = "2018",
|
| 53 |
+
address = "Belgium, Brussels",
|
| 54 |
+
publisher = "Association for Computational Linguistics",
|
| 55 |
+
url = "https://www.aclweb.org/anthology/W18-6319",
|
| 56 |
+
pages = "186--191",
|
| 57 |
+
}
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
_DESCRIPTION = """\
|
| 61 |
+
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
|
| 62 |
+
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
|
| 63 |
+
that is already present in sacrebleu.
|
| 64 |
+
|
| 65 |
+
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
|
| 66 |
+
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
|
| 67 |
+
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
|
| 68 |
+
|
| 69 |
+
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
_KWARGS_DESCRIPTION = """
|
| 73 |
+
Produces ChrF(++) scores for hypotheses given reference translations.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
predictions (list of str): The predicted sentences.
|
| 77 |
+
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
|
| 78 |
+
char_order (int): Character n-gram order. Defaults to `6`.
|
| 79 |
+
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
|
| 80 |
+
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
|
| 81 |
+
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
|
| 82 |
+
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
|
| 83 |
+
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
|
| 84 |
+
to reference chrF++.py, NLTK and Moses implementations. If `False`,
|
| 85 |
+
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
'score' (float): The chrF (chrF++) score,
|
| 89 |
+
'char_order' (int): The character n-gram order,
|
| 90 |
+
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
|
| 91 |
+
'beta' (int): Determine the importance of recall w.r.t precision
|
| 92 |
+
|
| 93 |
+
Examples:
|
| 94 |
+
Example 1--a simple example of calculating chrF:
|
| 95 |
+
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
|
| 96 |
+
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
|
| 97 |
+
>>> chrf = datasets.load_metric("chrf")
|
| 98 |
+
>>> results = chrf.compute(predictions=prediction, references=reference)
|
| 99 |
+
>>> print(results)
|
| 100 |
+
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
|
| 101 |
+
|
| 102 |
+
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
|
| 103 |
+
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
|
| 104 |
+
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
|
| 105 |
+
>>> chrf = datasets.load_metric("chrf")
|
| 106 |
+
>>> results = chrf.compute(predictions=prediction,
|
| 107 |
+
... references=reference,
|
| 108 |
+
... word_order=2)
|
| 109 |
+
>>> print(results)
|
| 110 |
+
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
|
| 111 |
+
|
| 112 |
+
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
|
| 113 |
+
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
|
| 114 |
+
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
|
| 115 |
+
>>> chrf = datasets.load_metric("chrf")
|
| 116 |
+
>>> results = chrf.compute(predictions=prediction,
|
| 117 |
+
... references=reference,
|
| 118 |
+
... word_order=2,
|
| 119 |
+
... lowercase=True)
|
| 120 |
+
>>> print(results)
|
| 121 |
+
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 126 |
+
class ChrF(datasets.Metric):
|
| 127 |
+
def _info(self):
|
| 128 |
+
if version.parse(scb.__version__) < version.parse("1.4.12"):
|
| 129 |
+
raise ImportWarning(
|
| 130 |
+
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
|
| 131 |
+
'You can install it with `pip install "sacrebleu>=1.4.12"`.'
|
| 132 |
+
)
|
| 133 |
+
return datasets.MetricInfo(
|
| 134 |
+
description=_DESCRIPTION,
|
| 135 |
+
citation=_CITATION,
|
| 136 |
+
homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf",
|
| 137 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 138 |
+
features=datasets.Features(
|
| 139 |
+
{
|
| 140 |
+
"predictions": datasets.Value("string", id="sequence"),
|
| 141 |
+
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"),
|
| 142 |
+
}
|
| 143 |
+
),
|
| 144 |
+
codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"],
|
| 145 |
+
reference_urls=[
|
| 146 |
+
"https://github.com/m-popovic/chrF",
|
| 147 |
+
],
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
def _compute(
|
| 151 |
+
self,
|
| 152 |
+
predictions,
|
| 153 |
+
references,
|
| 154 |
+
char_order: int = CHRF.CHAR_ORDER,
|
| 155 |
+
word_order: int = CHRF.WORD_ORDER,
|
| 156 |
+
beta: int = CHRF.BETA,
|
| 157 |
+
lowercase: bool = False,
|
| 158 |
+
whitespace: bool = False,
|
| 159 |
+
eps_smoothing: bool = False,
|
| 160 |
+
):
|
| 161 |
+
references_per_prediction = len(references[0])
|
| 162 |
+
if any(len(refs) != references_per_prediction for refs in references):
|
| 163 |
+
raise ValueError("Sacrebleu requires the same number of references for each prediction")
|
| 164 |
+
transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)]
|
| 165 |
+
|
| 166 |
+
sb_chrf = CHRF(char_order, word_order, beta, lowercase, whitespace, eps_smoothing)
|
| 167 |
+
output = sb_chrf.corpus_score(predictions, transformed_references)
|
| 168 |
+
|
| 169 |
+
return {
|
| 170 |
+
"score": output.score,
|
| 171 |
+
"char_order": output.char_order,
|
| 172 |
+
"word_order": output.word_order,
|
| 173 |
+
"beta": output.beta,
|
| 174 |
+
}
|
testbed/huggingface__datasets/metrics/code_eval/README.md
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for Code Eval
|
| 2 |
+
|
| 3 |
+
## Metric description
|
| 4 |
+
|
| 5 |
+
The CodeEval metric estimates the pass@k metric for code synthesis.
|
| 6 |
+
|
| 7 |
+
It implements the evaluation harness for the HumanEval problem solving dataset described in the paper ["Evaluating Large Language Models Trained on Code"](https://arxiv.org/abs/2107.03374).
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
## How to use
|
| 11 |
+
|
| 12 |
+
The Code Eval metric calculates how good are predictions given a set of references. Its arguments are:
|
| 13 |
+
|
| 14 |
+
`predictions`: a list of candidates to evaluate. Each candidate should be a list of strings with several code candidates to solve the problem.
|
| 15 |
+
|
| 16 |
+
`references`: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate.
|
| 17 |
+
|
| 18 |
+
`k`: number of code candidates to consider in the evaluation. The default value is `[1, 10, 100]`.
|
| 19 |
+
|
| 20 |
+
`num_workers`: the number of workers used to evaluate the candidate programs (The default value is `4`).
|
| 21 |
+
|
| 22 |
+
`timeout`: The maximum time taken to produce a prediction before it is considered a "timeout". The default value is `3.0` (i.e. 3 seconds).
|
| 23 |
+
|
| 24 |
+
```python
|
| 25 |
+
from datasets import load_metric
|
| 26 |
+
code_eval = load_metric("code_eval")
|
| 27 |
+
test_cases = ["assert add(2,3)==5"]
|
| 28 |
+
candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
|
| 29 |
+
pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
N.B.
|
| 33 |
+
This metric exists to run untrusted model-generated code. Users are strongly encouraged not to do so outside of a robust security sandbox. Before running this metric and once you've taken the necessary precautions, you will need to set the `HF_ALLOW_CODE_EVAL` environment variable. Use it at your own risk:
|
| 34 |
+
```python
|
| 35 |
+
import os
|
| 36 |
+
os.environ["HF_ALLOW_CODE_EVAL"] = "1"`
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
## Output values
|
| 40 |
+
|
| 41 |
+
The Code Eval metric outputs two things:
|
| 42 |
+
|
| 43 |
+
`pass_at_k`: a dictionary with the pass rates for each k value defined in the arguments.
|
| 44 |
+
|
| 45 |
+
`results`: a dictionary with granular results of each unit test.
|
| 46 |
+
|
| 47 |
+
### Values from popular papers
|
| 48 |
+
The [original CODEX paper](https://arxiv.org/pdf/2107.03374.pdf) reported that the CODEX-12B model had a pass@k score of 28.8% at `k=1`, 46.8% at `k=10` and 72.3% at `k=100`. However, since the CODEX model is not open source, it is hard to verify these numbers.
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
## Examples
|
| 53 |
+
|
| 54 |
+
Full match at `k=1`:
|
| 55 |
+
|
| 56 |
+
```python
|
| 57 |
+
from datasets import load_metric
|
| 58 |
+
code_eval = load_metric("code_eval")
|
| 59 |
+
test_cases = ["assert add(2,3)==5"]
|
| 60 |
+
candidates = [["def add(a, b): return a+b"]]
|
| 61 |
+
pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1])
|
| 62 |
+
print(pass_at_k)
|
| 63 |
+
{'pass@1': 1.0}
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
No match for k = 1:
|
| 67 |
+
|
| 68 |
+
```python
|
| 69 |
+
from datasets import load_metric
|
| 70 |
+
code_eval = load_metric("code_eval")
|
| 71 |
+
test_cases = ["assert add(2,3)==5"]
|
| 72 |
+
candidates = [["def add(a,b): return a*b"]]
|
| 73 |
+
pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1])
|
| 74 |
+
print(pass_at_k)
|
| 75 |
+
{'pass@1': 0.0}
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
Partial match at k=1, full match at k=2:
|
| 79 |
+
|
| 80 |
+
```python
|
| 81 |
+
from datasets import load_metric
|
| 82 |
+
code_eval = load_metric("code_eval")
|
| 83 |
+
test_cases = ["assert add(2,3)==5"]
|
| 84 |
+
candidates = [["def add(a, b): return a+b", "def add(a,b): return a*b"]]
|
| 85 |
+
pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
|
| 86 |
+
print(pass_at_k)
|
| 87 |
+
{'pass@1': 0.5, 'pass@2': 1.0}
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
## Limitations and bias
|
| 91 |
+
|
| 92 |
+
As per the warning included in the metric code itself:
|
| 93 |
+
> This program exists to execute untrusted model-generated code. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the accompanying paper. Once you have read this disclaimer and taken appropriate precautions, uncomment the following line and proceed at your own risk:
|
| 94 |
+
|
| 95 |
+
More information about the limitations of the code can be found on the [Human Eval Github repository](https://github.com/openai/human-eval).
|
| 96 |
+
|
| 97 |
+
## Citation
|
| 98 |
+
|
| 99 |
+
```bibtex
|
| 100 |
+
@misc{chen2021evaluating,
|
| 101 |
+
title={Evaluating Large Language Models Trained on Code},
|
| 102 |
+
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
|
| 103 |
+
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
|
| 104 |
+
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
|
| 105 |
+
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
|
| 106 |
+
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
|
| 107 |
+
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
|
| 108 |
+
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
|
| 109 |
+
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
|
| 110 |
+
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
|
| 111 |
+
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
|
| 112 |
+
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
|
| 113 |
+
and William Saunders and Christopher Hesse and Andrew N. Carr \
|
| 114 |
+
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
|
| 115 |
+
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
|
| 116 |
+
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
|
| 117 |
+
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
|
| 118 |
+
year={2021},
|
| 119 |
+
eprint={2107.03374},
|
| 120 |
+
archivePrefix={arXiv},
|
| 121 |
+
primaryClass={cs.LG}
|
| 122 |
+
}
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
## Further References
|
| 126 |
+
|
| 127 |
+
- [Human Eval Github repository](https://github.com/openai/human-eval)
|
| 128 |
+
- [OpenAI Codex website](https://openai.com/blog/openai-codex/)
|
testbed/huggingface__datasets/metrics/code_eval/code_eval.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""The CodeEval metric estimates the pass@k metric for code synthesis.
|
| 15 |
+
This is an evaluation harness for the HumanEval problem solving dataset
|
| 16 |
+
described in the paper "Evaluating Large Language Models Trained on Code"
|
| 17 |
+
(https://arxiv.org/abs/2107.03374)."""
|
| 18 |
+
|
| 19 |
+
import itertools
|
| 20 |
+
import os
|
| 21 |
+
from collections import Counter, defaultdict
|
| 22 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 23 |
+
|
| 24 |
+
import numpy as np
|
| 25 |
+
|
| 26 |
+
import datasets
|
| 27 |
+
|
| 28 |
+
from .execute import check_correctness
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
_CITATION = """\
|
| 32 |
+
@misc{chen2021evaluating,
|
| 33 |
+
title={Evaluating Large Language Models Trained on Code},
|
| 34 |
+
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
|
| 35 |
+
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
|
| 36 |
+
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
|
| 37 |
+
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
|
| 38 |
+
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
|
| 39 |
+
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
|
| 40 |
+
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
|
| 41 |
+
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
|
| 42 |
+
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
|
| 43 |
+
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
|
| 44 |
+
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
|
| 45 |
+
and William Saunders and Christopher Hesse and Andrew N. Carr \
|
| 46 |
+
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
|
| 47 |
+
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
|
| 48 |
+
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
|
| 49 |
+
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
|
| 50 |
+
year={2021},
|
| 51 |
+
eprint={2107.03374},
|
| 52 |
+
archivePrefix={arXiv},
|
| 53 |
+
primaryClass={cs.LG}
|
| 54 |
+
}
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
_DESCRIPTION = """\
|
| 58 |
+
This metric implements the evaluation harness for the HumanEval problem solving dataset
|
| 59 |
+
described in the paper "Evaluating Large Language Models Trained on Code"
|
| 60 |
+
(https://arxiv.org/abs/2107.03374).
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
_KWARGS_DESCRIPTION = """
|
| 65 |
+
Calculates how good are predictions given some references, using certain scores
|
| 66 |
+
Args:
|
| 67 |
+
predictions: list of candidates to evaluate. Each candidates should be a list
|
| 68 |
+
of strings with several code candidates to solve the problem.
|
| 69 |
+
references: a list with a test for each prediction. Each test should evaluate the
|
| 70 |
+
correctness of a code candidate.
|
| 71 |
+
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
|
| 72 |
+
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
|
| 73 |
+
timeout:
|
| 74 |
+
Returns:
|
| 75 |
+
pass_at_k: dict with pass rates for each k
|
| 76 |
+
results: dict with granular results of each unittest
|
| 77 |
+
Examples:
|
| 78 |
+
>>> code_eval = datasets.load_metric("code_eval")
|
| 79 |
+
>>> test_cases = ["assert add(2,3)==5"]
|
| 80 |
+
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
|
| 81 |
+
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
|
| 82 |
+
>>> print(pass_at_k)
|
| 83 |
+
{'pass@1': 0.5, 'pass@2': 1.0}
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
_WARNING = """
|
| 88 |
+
################################################################################
|
| 89 |
+
!!!WARNING!!!
|
| 90 |
+
################################################################################
|
| 91 |
+
The "code_eval" metric executes untrusted model-generated code in Python.
|
| 92 |
+
Although it is highly unlikely that model-generated code will do something
|
| 93 |
+
overtly malicious in response to this test suite, model-generated code may act
|
| 94 |
+
destructively due to a lack of model capability or alignment.
|
| 95 |
+
Users are strongly encouraged to sandbox this evaluation suite so that it
|
| 96 |
+
does not perform destructive actions on their host or network. For more
|
| 97 |
+
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
|
| 98 |
+
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
|
| 99 |
+
|
| 100 |
+
Once you have read this disclaimer and taken appropriate precautions,
|
| 101 |
+
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
|
| 102 |
+
with:
|
| 103 |
+
|
| 104 |
+
>>> import os
|
| 105 |
+
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
|
| 106 |
+
|
| 107 |
+
################################################################################\
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
_LICENSE = """The MIT License
|
| 111 |
+
|
| 112 |
+
Copyright (c) OpenAI (https://openai.com)
|
| 113 |
+
|
| 114 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 115 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 116 |
+
in the Software without restriction, including without limitation the rights
|
| 117 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 118 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 119 |
+
furnished to do so, subject to the following conditions:
|
| 120 |
+
|
| 121 |
+
The above copyright notice and this permission notice shall be included in
|
| 122 |
+
all copies or substantial portions of the Software.
|
| 123 |
+
|
| 124 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 125 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 126 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 127 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 128 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 129 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
| 130 |
+
THE SOFTWARE."""
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 134 |
+
class CodeEval(datasets.Metric):
|
| 135 |
+
def _info(self):
|
| 136 |
+
return datasets.MetricInfo(
|
| 137 |
+
# This is the description that will appear on the metrics page.
|
| 138 |
+
description=_DESCRIPTION,
|
| 139 |
+
citation=_CITATION,
|
| 140 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 141 |
+
# This defines the format of each prediction and reference
|
| 142 |
+
features=datasets.Features(
|
| 143 |
+
{
|
| 144 |
+
"predictions": datasets.Sequence(datasets.Value("string")),
|
| 145 |
+
"references": datasets.Value("string"),
|
| 146 |
+
}
|
| 147 |
+
),
|
| 148 |
+
homepage="https://github.com/openai/human-eval",
|
| 149 |
+
codebase_urls=["https://github.com/openai/human-eval"],
|
| 150 |
+
reference_urls=["https://github.com/openai/human-eval"],
|
| 151 |
+
license=_LICENSE,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
def _compute(self, predictions, references, k=[1, 10, 100], num_workers=4, timeout=3.0):
|
| 155 |
+
"""Returns the scores"""
|
| 156 |
+
|
| 157 |
+
if os.getenv("HF_ALLOW_CODE_EVAL", 0) != "1":
|
| 158 |
+
raise ValueError(_WARNING)
|
| 159 |
+
|
| 160 |
+
if os.name == "nt":
|
| 161 |
+
raise NotImplementedError("This metric is currently not supported on Windows.")
|
| 162 |
+
|
| 163 |
+
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
| 164 |
+
futures = []
|
| 165 |
+
completion_id = Counter()
|
| 166 |
+
n_samples = 0
|
| 167 |
+
results = defaultdict(list)
|
| 168 |
+
|
| 169 |
+
for task_id, (candidates, test_case) in enumerate(zip(predictions, references)):
|
| 170 |
+
for candidate in candidates:
|
| 171 |
+
test_program = candidate + "\n" + test_case
|
| 172 |
+
args = (test_program, timeout, task_id, completion_id[task_id])
|
| 173 |
+
future = executor.submit(check_correctness, *args)
|
| 174 |
+
futures.append(future)
|
| 175 |
+
completion_id[task_id] += 1
|
| 176 |
+
n_samples += 1
|
| 177 |
+
|
| 178 |
+
for future in as_completed(futures):
|
| 179 |
+
result = future.result()
|
| 180 |
+
results[result["task_id"]].append((result["completion_id"], result))
|
| 181 |
+
|
| 182 |
+
total, correct = [], []
|
| 183 |
+
for result in results.values():
|
| 184 |
+
result.sort()
|
| 185 |
+
passed = [r[1]["passed"] for r in result]
|
| 186 |
+
total.append(len(passed))
|
| 187 |
+
correct.append(sum(passed))
|
| 188 |
+
total = np.array(total)
|
| 189 |
+
correct = np.array(correct)
|
| 190 |
+
|
| 191 |
+
ks = k
|
| 192 |
+
pass_at_k = {f"pass@{k}": estimate_pass_at_k(total, correct, k).mean() for k in ks if (total >= k).all()}
|
| 193 |
+
|
| 194 |
+
return pass_at_k, results
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def estimate_pass_at_k(num_samples, num_correct, k):
|
| 198 |
+
"""Estimates pass@k of each problem and returns them in an array."""
|
| 199 |
+
|
| 200 |
+
def estimator(n: int, c: int, k: int) -> float:
|
| 201 |
+
"""Calculates 1 - comb(n - c, k) / comb(n, k)."""
|
| 202 |
+
if n - c < k:
|
| 203 |
+
return 1.0
|
| 204 |
+
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
|
| 205 |
+
|
| 206 |
+
if isinstance(num_samples, int):
|
| 207 |
+
num_samples_it = itertools.repeat(num_samples, len(num_correct))
|
| 208 |
+
else:
|
| 209 |
+
assert len(num_samples) == len(num_correct)
|
| 210 |
+
num_samples_it = iter(num_samples)
|
| 211 |
+
|
| 212 |
+
return np.array([estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)])
|
testbed/huggingface__datasets/metrics/code_eval/execute.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# This code is adapted from OpenAI's release
|
| 16 |
+
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
|
| 17 |
+
|
| 18 |
+
import contextlib
|
| 19 |
+
import faulthandler
|
| 20 |
+
import io
|
| 21 |
+
import multiprocessing
|
| 22 |
+
import os
|
| 23 |
+
import platform
|
| 24 |
+
import signal
|
| 25 |
+
import tempfile
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def check_correctness(check_program, timeout, task_id, completion_id):
|
| 29 |
+
"""
|
| 30 |
+
Evaluates the functional correctness of a completion by running the test
|
| 31 |
+
suite provided in the problem.
|
| 32 |
+
|
| 33 |
+
:param completion_id: an optional completion ID so we can match
|
| 34 |
+
the results later even if execution finishes asynchronously.
|
| 35 |
+
"""
|
| 36 |
+
manager = multiprocessing.Manager()
|
| 37 |
+
result = manager.list()
|
| 38 |
+
|
| 39 |
+
p = multiprocessing.Process(target=unsafe_execute, args=(check_program, result, timeout))
|
| 40 |
+
p.start()
|
| 41 |
+
p.join(timeout=timeout + 1)
|
| 42 |
+
if p.is_alive():
|
| 43 |
+
p.kill()
|
| 44 |
+
|
| 45 |
+
if not result:
|
| 46 |
+
result.append("timed out")
|
| 47 |
+
|
| 48 |
+
return {
|
| 49 |
+
"task_id": task_id,
|
| 50 |
+
"passed": result[0] == "passed",
|
| 51 |
+
"result": result[0],
|
| 52 |
+
"completion_id": completion_id,
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def unsafe_execute(check_program, result, timeout):
|
| 57 |
+
with create_tempdir():
|
| 58 |
+
# These system calls are needed when cleaning up tempdir.
|
| 59 |
+
import os
|
| 60 |
+
import shutil
|
| 61 |
+
|
| 62 |
+
rmtree = shutil.rmtree
|
| 63 |
+
rmdir = os.rmdir
|
| 64 |
+
chdir = os.chdir
|
| 65 |
+
|
| 66 |
+
# Disable functionalities that can make destructive changes to the test.
|
| 67 |
+
reliability_guard()
|
| 68 |
+
|
| 69 |
+
# Run program.
|
| 70 |
+
try:
|
| 71 |
+
exec_globals = {}
|
| 72 |
+
with swallow_io():
|
| 73 |
+
with time_limit(timeout):
|
| 74 |
+
exec(check_program, exec_globals)
|
| 75 |
+
result.append("passed")
|
| 76 |
+
except TimeoutException:
|
| 77 |
+
result.append("timed out")
|
| 78 |
+
except BaseException as e:
|
| 79 |
+
result.append(f"failed: {e}")
|
| 80 |
+
|
| 81 |
+
# Needed for cleaning up.
|
| 82 |
+
shutil.rmtree = rmtree
|
| 83 |
+
os.rmdir = rmdir
|
| 84 |
+
os.chdir = chdir
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@contextlib.contextmanager
|
| 88 |
+
def time_limit(seconds):
|
| 89 |
+
def signal_handler(signum, frame):
|
| 90 |
+
raise TimeoutException("Timed out!")
|
| 91 |
+
|
| 92 |
+
signal.setitimer(signal.ITIMER_REAL, seconds)
|
| 93 |
+
signal.signal(signal.SIGALRM, signal_handler)
|
| 94 |
+
try:
|
| 95 |
+
yield
|
| 96 |
+
finally:
|
| 97 |
+
signal.setitimer(signal.ITIMER_REAL, 0)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@contextlib.contextmanager
|
| 101 |
+
def swallow_io():
|
| 102 |
+
stream = WriteOnlyStringIO()
|
| 103 |
+
with contextlib.redirect_stdout(stream):
|
| 104 |
+
with contextlib.redirect_stderr(stream):
|
| 105 |
+
with redirect_stdin(stream):
|
| 106 |
+
yield
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@contextlib.contextmanager
|
| 110 |
+
def create_tempdir():
|
| 111 |
+
with tempfile.TemporaryDirectory() as dirname:
|
| 112 |
+
with chdir(dirname):
|
| 113 |
+
yield dirname
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class TimeoutException(Exception):
|
| 117 |
+
pass
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class WriteOnlyStringIO(io.StringIO):
|
| 121 |
+
"""StringIO that throws an exception when it's read from"""
|
| 122 |
+
|
| 123 |
+
def read(self, *args, **kwargs):
|
| 124 |
+
raise OSError
|
| 125 |
+
|
| 126 |
+
def readline(self, *args, **kwargs):
|
| 127 |
+
raise OSError
|
| 128 |
+
|
| 129 |
+
def readlines(self, *args, **kwargs):
|
| 130 |
+
raise OSError
|
| 131 |
+
|
| 132 |
+
def readable(self, *args, **kwargs):
|
| 133 |
+
"""Returns True if the IO object can be read."""
|
| 134 |
+
return False
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class redirect_stdin(contextlib._RedirectStream): # type: ignore
|
| 138 |
+
_stream = "stdin"
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@contextlib.contextmanager
|
| 142 |
+
def chdir(root):
|
| 143 |
+
if root == ".":
|
| 144 |
+
yield
|
| 145 |
+
return
|
| 146 |
+
cwd = os.getcwd()
|
| 147 |
+
os.chdir(root)
|
| 148 |
+
try:
|
| 149 |
+
yield
|
| 150 |
+
except BaseException as exc:
|
| 151 |
+
raise exc
|
| 152 |
+
finally:
|
| 153 |
+
os.chdir(cwd)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def reliability_guard(maximum_memory_bytes=None):
|
| 157 |
+
"""
|
| 158 |
+
This disables various destructive functions and prevents the generated code
|
| 159 |
+
from interfering with the test (e.g. fork bomb, killing other processes,
|
| 160 |
+
removing filesystem files, etc.)
|
| 161 |
+
|
| 162 |
+
WARNING
|
| 163 |
+
This function is NOT a security sandbox. Untrusted code, including, model-
|
| 164 |
+
generated code, should not be blindly executed outside of one. See the
|
| 165 |
+
Codex paper for more information about OpenAI's code sandbox, and proceed
|
| 166 |
+
with caution.
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
if maximum_memory_bytes is not None:
|
| 170 |
+
import resource
|
| 171 |
+
|
| 172 |
+
resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes))
|
| 173 |
+
resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes))
|
| 174 |
+
if not platform.uname().system == "Darwin":
|
| 175 |
+
resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes))
|
| 176 |
+
|
| 177 |
+
faulthandler.disable()
|
| 178 |
+
|
| 179 |
+
import builtins
|
| 180 |
+
|
| 181 |
+
builtins.exit = None
|
| 182 |
+
builtins.quit = None
|
| 183 |
+
|
| 184 |
+
import os
|
| 185 |
+
|
| 186 |
+
os.environ["OMP_NUM_THREADS"] = "1"
|
| 187 |
+
|
| 188 |
+
os.kill = None
|
| 189 |
+
os.system = None
|
| 190 |
+
os.putenv = None
|
| 191 |
+
os.remove = None
|
| 192 |
+
os.removedirs = None
|
| 193 |
+
os.rmdir = None
|
| 194 |
+
os.fchdir = None
|
| 195 |
+
os.setuid = None
|
| 196 |
+
os.fork = None
|
| 197 |
+
os.forkpty = None
|
| 198 |
+
os.killpg = None
|
| 199 |
+
os.rename = None
|
| 200 |
+
os.renames = None
|
| 201 |
+
os.truncate = None
|
| 202 |
+
os.replace = None
|
| 203 |
+
os.unlink = None
|
| 204 |
+
os.fchmod = None
|
| 205 |
+
os.fchown = None
|
| 206 |
+
os.chmod = None
|
| 207 |
+
os.chown = None
|
| 208 |
+
os.chroot = None
|
| 209 |
+
os.fchdir = None
|
| 210 |
+
os.lchflags = None
|
| 211 |
+
os.lchmod = None
|
| 212 |
+
os.lchown = None
|
| 213 |
+
os.getcwd = None
|
| 214 |
+
os.chdir = None
|
| 215 |
+
|
| 216 |
+
import shutil
|
| 217 |
+
|
| 218 |
+
shutil.rmtree = None
|
| 219 |
+
shutil.move = None
|
| 220 |
+
shutil.chown = None
|
| 221 |
+
|
| 222 |
+
import subprocess
|
| 223 |
+
|
| 224 |
+
subprocess.Popen = None # type: ignore
|
| 225 |
+
|
| 226 |
+
__builtins__["help"] = None
|
| 227 |
+
|
| 228 |
+
import sys
|
| 229 |
+
|
| 230 |
+
sys.modules["ipdb"] = None
|
| 231 |
+
sys.modules["joblib"] = None
|
| 232 |
+
sys.modules["resource"] = None
|
| 233 |
+
sys.modules["psutil"] = None
|
| 234 |
+
sys.modules["tkinter"] = None
|
testbed/huggingface__datasets/metrics/comet/README.md
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for COMET
|
| 2 |
+
|
| 3 |
+
## Metric description
|
| 4 |
+
|
| 5 |
+
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments.
|
| 6 |
+
|
| 7 |
+
## How to use
|
| 8 |
+
|
| 9 |
+
COMET takes 3 lists of strings as input: `sources` (a list of source sentences), `predictions` (a list of candidate translations) and `references` (a list of reference translations).
|
| 10 |
+
|
| 11 |
+
```python
|
| 12 |
+
from datasets import load_metric
|
| 13 |
+
comet_metric = load_metric('comet')
|
| 14 |
+
source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
|
| 15 |
+
hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
|
| 16 |
+
reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
|
| 17 |
+
comet_score = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
It has several configurations, named after the COMET model to be used. It will default to `wmt20-comet-da` (previously known as `wmt-large-da-estimator-1719`). Alternate models that can be chosen include `wmt20-comet-qe-da`, `wmt21-comet-mqm`, `wmt21-cometinho-da`, `wmt21-comet-qe-mqm` and `emnlp20-comet-rank`.
|
| 21 |
+
|
| 22 |
+
It also has several optional arguments:
|
| 23 |
+
|
| 24 |
+
`gpus`: optional, an integer (number of GPUs to train on) or a list of integers (which GPUs to train on). Set to 0 to use CPU. The default value is `None` (uses one GPU if possible, else use CPU).
|
| 25 |
+
|
| 26 |
+
`progress_bar`a boolean -- if set to `True`, progress updates will be printed out. The default value is `False`.
|
| 27 |
+
|
| 28 |
+
More information about model characteristics can be found on the [COMET website](https://unbabel.github.io/COMET/html/models.html).
|
| 29 |
+
|
| 30 |
+
## Output values
|
| 31 |
+
|
| 32 |
+
The COMET metric outputs two lists:
|
| 33 |
+
|
| 34 |
+
`scores`: a list of COMET scores for each of the input sentences, ranging from 0-1.
|
| 35 |
+
|
| 36 |
+
`mean_score`: the mean value of COMET scores `scores` over all the input sentences, ranging from 0-1.
|
| 37 |
+
|
| 38 |
+
### Values from popular papers
|
| 39 |
+
|
| 40 |
+
The [original COMET paper](https://arxiv.org/pdf/2009.09025.pdf) reported average COMET scores ranging from 0.4 to 0.6, depending on the language pairs used for evaluating translation models. They also illustrate that COMET correlates well with human judgements compared to other metrics such as [BLEU](https://huggingface.co/metrics/bleu) and [CHRF](https://huggingface.co/metrics/chrf).
|
| 41 |
+
|
| 42 |
+
## Examples
|
| 43 |
+
|
| 44 |
+
Full match:
|
| 45 |
+
|
| 46 |
+
```python
|
| 47 |
+
from datasets import load_metric
|
| 48 |
+
comet_metric = load_metric('comet')
|
| 49 |
+
source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
|
| 50 |
+
hypothesis = ["They were able to control the fire.", "Schools and kindergartens opened"]
|
| 51 |
+
reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
|
| 52 |
+
results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
|
| 53 |
+
print([round(v, 1) for v in results["scores"]])
|
| 54 |
+
[1.0, 1.0]
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
Partial match:
|
| 58 |
+
|
| 59 |
+
```python
|
| 60 |
+
from datasets import load_metric
|
| 61 |
+
comet_metric = load_metric('comet')
|
| 62 |
+
source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
|
| 63 |
+
hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
|
| 64 |
+
reference = ["They were able to control the fire", "Schools and kindergartens opened"]
|
| 65 |
+
results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
|
| 66 |
+
print([round(v, 2) for v in results["scores"]])
|
| 67 |
+
[0.19, 0.92]
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
No match:
|
| 71 |
+
|
| 72 |
+
```python
|
| 73 |
+
from datasets import load_metric
|
| 74 |
+
comet_metric = load_metric('comet')
|
| 75 |
+
source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
|
| 76 |
+
hypothesis = ["The girl went for a walk", "The boy was sleeping"]
|
| 77 |
+
reference = ["They were able to control the fire", "Schools and kindergartens opened"]
|
| 78 |
+
results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
|
| 79 |
+
print([round(v, 2) for v in results["scores"]])
|
| 80 |
+
[0.00, 0.00]
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
## Limitations and bias
|
| 84 |
+
|
| 85 |
+
The models provided for calculating the COMET metric are built on top of XLM-R and cover the following languages:
|
| 86 |
+
|
| 87 |
+
Afrikaans, Albanian, Amharic, Arabic, Armenian, Assamese, Azerbaijani, Basque, Belarusian, Bengali, Bengali Romanized, Bosnian, Breton, Bulgarian, Burmese, Burmese, Catalan, Chinese (Simplified), Chinese (Traditional), Croatian, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Hausa, Hebrew, Hindi, Hindi Romanized, Hungarian, Icelandic, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish (Kurmanji), Kyrgyz, Lao, Latin, Latvian, Lithuanian, Macedonian, Malagasy, Malay, Malayalam, Marathi, Mongolian, Nepali, Norwegian, Oriya, Oromo, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Sanskri, Scottish, Gaelic, Serbian, Sindhi, Sinhala, Slovak, Slovenian, Somali, Spanish, Sundanese, Swahili, Swedish, Tamil, Tamil Romanized, Telugu, Telugu Romanized, Thai, Turkish, Ukrainian, Urdu, Urdu Romanized, Uyghur, Uzbek, Vietnamese, Welsh, Western, Frisian, Xhosa, Yiddish.
|
| 88 |
+
|
| 89 |
+
Thus, results for language pairs containing uncovered languages are unreliable, as per the [COMET website](https://github.com/Unbabel/COMET)
|
| 90 |
+
|
| 91 |
+
Also, calculating the COMET metric involves downloading the model from which features are obtained -- the default model, `wmt20-comet-da`, takes over 1.79GB of storage space and downloading it can take a significant amount of time depending on the speed of your internet connection. If this is an issue, choose a smaller model; for instance `wmt21-cometinho-da` is 344MB.
|
| 92 |
+
|
| 93 |
+
## Citation
|
| 94 |
+
|
| 95 |
+
```bibtex
|
| 96 |
+
@inproceedings{rei-EtAl:2020:WMT,
|
| 97 |
+
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
|
| 98 |
+
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
|
| 99 |
+
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
|
| 100 |
+
month = {November},
|
| 101 |
+
year = {2020},
|
| 102 |
+
address = {Online},
|
| 103 |
+
publisher = {Association for Computational Linguistics},
|
| 104 |
+
pages = {909--918},
|
| 105 |
+
}
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
```bibtex
|
| 109 |
+
@inproceedings{rei-etal-2020-comet,
|
| 110 |
+
title = "{COMET}: A Neural Framework for {MT} Evaluation",
|
| 111 |
+
author = "Rei, Ricardo and
|
| 112 |
+
Stewart, Craig and
|
| 113 |
+
Farinha, Ana C and
|
| 114 |
+
Lavie, Alon",
|
| 115 |
+
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
|
| 116 |
+
month = nov,
|
| 117 |
+
year = "2020",
|
| 118 |
+
address = "Online",
|
| 119 |
+
publisher = "Association for Computational Linguistics",
|
| 120 |
+
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
|
| 121 |
+
pages = "2685--2702",
|
| 122 |
+
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
## Further References
|
| 126 |
+
|
| 127 |
+
- [COMET website](https://unbabel.github.io/COMET/html/index.html)
|
| 128 |
+
- [Hugging Face Tasks - Machine Translation](https://huggingface.co/tasks/translation)
|
testbed/huggingface__datasets/metrics/comet/comet.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
""" COMET metric.
|
| 15 |
+
|
| 16 |
+
Requirements:
|
| 17 |
+
pip install unbabel-comet
|
| 18 |
+
|
| 19 |
+
Usage:
|
| 20 |
+
|
| 21 |
+
```python
|
| 22 |
+
from datasets import load_metric
|
| 23 |
+
comet_metric = load_metric('metrics/comet/comet.py')
|
| 24 |
+
#comet_metric = load_metric('comet')
|
| 25 |
+
#comet_metric = load_metric('comet', 'wmt-large-hter-estimator')
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
|
| 29 |
+
hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
|
| 30 |
+
reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
|
| 31 |
+
|
| 32 |
+
predictions = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
|
| 33 |
+
predictions['scores']
|
| 34 |
+
```
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
import comet # From: unbabel-comet
|
| 38 |
+
import torch
|
| 39 |
+
|
| 40 |
+
import datasets
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
logger = datasets.logging.get_logger(__name__)
|
| 44 |
+
|
| 45 |
+
_CITATION = """\
|
| 46 |
+
@inproceedings{rei-EtAl:2020:WMT,
|
| 47 |
+
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
|
| 48 |
+
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
|
| 49 |
+
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
|
| 50 |
+
month = {November},
|
| 51 |
+
year = {2020},
|
| 52 |
+
address = {Online},
|
| 53 |
+
publisher = {Association for Computational Linguistics},
|
| 54 |
+
pages = {909--918},
|
| 55 |
+
}
|
| 56 |
+
@inproceedings{rei-etal-2020-comet,
|
| 57 |
+
title = "{COMET}: A Neural Framework for {MT} Evaluation",
|
| 58 |
+
author = "Rei, Ricardo and
|
| 59 |
+
Stewart, Craig and
|
| 60 |
+
Farinha, Ana C and
|
| 61 |
+
Lavie, Alon",
|
| 62 |
+
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
|
| 63 |
+
month = nov,
|
| 64 |
+
year = "2020",
|
| 65 |
+
address = "Online",
|
| 66 |
+
publisher = "Association for Computational Linguistics",
|
| 67 |
+
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
|
| 68 |
+
pages = "2685--2702",
|
| 69 |
+
}
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
_DESCRIPTION = """\
|
| 73 |
+
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
|
| 74 |
+
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
|
| 75 |
+
|
| 76 |
+
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
_KWARGS_DESCRIPTION = """
|
| 80 |
+
COMET score.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
|
| 84 |
+
`sources` (list of str): Source sentences
|
| 85 |
+
`predictions` (list of str): candidate translations
|
| 86 |
+
`references` (list of str): reference translations
|
| 87 |
+
`cuda` (bool): If set to True, runs COMET using GPU
|
| 88 |
+
`show_progress` (bool): Shows progress
|
| 89 |
+
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
|
| 93 |
+
`scores`: List of scores.
|
| 94 |
+
|
| 95 |
+
Examples:
|
| 96 |
+
|
| 97 |
+
>>> comet_metric = datasets.load_metric('comet')
|
| 98 |
+
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
|
| 99 |
+
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
|
| 100 |
+
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
|
| 101 |
+
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
|
| 102 |
+
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
|
| 103 |
+
>>> print([round(v, 2) for v in results["scores"]])
|
| 104 |
+
[0.19, 0.92]
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 109 |
+
class COMET(datasets.Metric):
|
| 110 |
+
def _info(self):
|
| 111 |
+
return datasets.MetricInfo(
|
| 112 |
+
description=_DESCRIPTION,
|
| 113 |
+
citation=_CITATION,
|
| 114 |
+
homepage="https://unbabel.github.io/COMET/html/index.html",
|
| 115 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 116 |
+
features=datasets.Features(
|
| 117 |
+
{
|
| 118 |
+
"sources": datasets.Value("string", id="sequence"),
|
| 119 |
+
"predictions": datasets.Value("string", id="sequence"),
|
| 120 |
+
"references": datasets.Value("string", id="sequence"),
|
| 121 |
+
}
|
| 122 |
+
),
|
| 123 |
+
codebase_urls=["https://github.com/Unbabel/COMET"],
|
| 124 |
+
reference_urls=[
|
| 125 |
+
"https://github.com/Unbabel/COMET",
|
| 126 |
+
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
|
| 127 |
+
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
|
| 128 |
+
],
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
def _download_and_prepare(self, dl_manager):
|
| 132 |
+
if self.config_name == "default":
|
| 133 |
+
self.scorer = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da"))
|
| 134 |
+
else:
|
| 135 |
+
self.scorer = comet.load_from_checkpoint(comet.download_model(self.config_name))
|
| 136 |
+
|
| 137 |
+
def _compute(self, sources, predictions, references, gpus=None, progress_bar=False):
|
| 138 |
+
if gpus is None:
|
| 139 |
+
gpus = 1 if torch.cuda.is_available() else 0
|
| 140 |
+
data = {"src": sources, "mt": predictions, "ref": references}
|
| 141 |
+
data = [dict(zip(data, t)) for t in zip(*data.values())]
|
| 142 |
+
scores, mean_score = self.scorer.predict(data, gpus=gpus, progress_bar=progress_bar)
|
| 143 |
+
return {"mean_score": mean_score, "scores": scores}
|
testbed/huggingface__datasets/metrics/competition_math/README.md
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for Competition MATH
|
| 2 |
+
|
| 3 |
+
## Metric description
|
| 4 |
+
|
| 5 |
+
This metric is used to assess performance on the [Mathematics Aptitude Test of Heuristics (MATH) dataset](https://huggingface.co/datasets/competition_math).
|
| 6 |
+
|
| 7 |
+
It first canonicalizes the inputs (e.g., converting `1/2` to `\\frac{1}{2}`) and then computes accuracy.
|
| 8 |
+
|
| 9 |
+
## How to use
|
| 10 |
+
|
| 11 |
+
This metric takes two arguments:
|
| 12 |
+
|
| 13 |
+
`predictions`: a list of predictions to score. Each prediction is a string that contains natural language and LaTeX.
|
| 14 |
+
|
| 15 |
+
`references`: list of reference for each prediction. Each reference is a string that contains natural language and LaTeX.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
```python
|
| 19 |
+
>>> from datasets import load_metric
|
| 20 |
+
>>> math = load_metric("competition_math")
|
| 21 |
+
>>> references = ["\\frac{1}{2}"]
|
| 22 |
+
>>> predictions = ["1/2"]
|
| 23 |
+
>>> results = math.compute(references=references, predictions=predictions)
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
N.B. To be able to use Competition MATH, you need to install the `math_equivalence` dependency using `pip install git+https://github.com/hendrycks/math.git`.
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
## Output values
|
| 30 |
+
|
| 31 |
+
This metric returns a dictionary that contains the [accuracy](https://huggingface.co/metrics/accuracy) after canonicalizing inputs, on a scale between 0.0 and 1.0.
|
| 32 |
+
|
| 33 |
+
### Values from popular papers
|
| 34 |
+
The [original MATH dataset paper](https://arxiv.org/abs/2103.03874) reported accuracies ranging from 3.0% to 6.9% by different large language models.
|
| 35 |
+
|
| 36 |
+
More recent progress on the dataset can be found on the [dataset leaderboard](https://paperswithcode.com/sota/math-word-problem-solving-on-math).
|
| 37 |
+
|
| 38 |
+
## Examples
|
| 39 |
+
|
| 40 |
+
Maximal values (full match):
|
| 41 |
+
|
| 42 |
+
```python
|
| 43 |
+
>>> from datasets import load_metric
|
| 44 |
+
>>> math = load_metric("competition_math")
|
| 45 |
+
>>> references = ["\\frac{1}{2}"]
|
| 46 |
+
>>> predictions = ["1/2"]
|
| 47 |
+
>>> results = math.compute(references=references, predictions=predictions)
|
| 48 |
+
>>> print(results)
|
| 49 |
+
{'accuracy': 1.0}
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
Minimal values (no match):
|
| 53 |
+
|
| 54 |
+
```python
|
| 55 |
+
>>> from datasets import load_metric
|
| 56 |
+
>>> math = load_metric("competition_math")
|
| 57 |
+
>>> references = ["\\frac{1}{2}"]
|
| 58 |
+
>>> predictions = ["3/4"]
|
| 59 |
+
>>> results = math.compute(references=references, predictions=predictions)
|
| 60 |
+
>>> print(results)
|
| 61 |
+
{'accuracy': 0.0}
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
Partial match:
|
| 65 |
+
|
| 66 |
+
```python
|
| 67 |
+
>>> from datasets import load_metric
|
| 68 |
+
>>> math = load_metric("competition_math")
|
| 69 |
+
>>> references = ["\\frac{1}{2}","\\frac{3}{4}"]
|
| 70 |
+
>>> predictions = ["1/5", "3/4"]
|
| 71 |
+
>>> results = math.compute(references=references, predictions=predictions)
|
| 72 |
+
>>> print(results)
|
| 73 |
+
{'accuracy': 0.5}
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
## Limitations and bias
|
| 77 |
+
|
| 78 |
+
This metric is limited to datasets with the same format as the [Mathematics Aptitude Test of Heuristics (MATH) dataset](https://huggingface.co/datasets/competition_math), and is meant to evaluate the performance of large language models at solving mathematical problems.
|
| 79 |
+
|
| 80 |
+
N.B. The MATH dataset also assigns levels of difficulty to different problems, so disagregating model performance by difficulty level (similarly to what was done in the [original paper](https://arxiv.org/abs/2103.03874) can give a better indication of how a given model does on a given difficulty of math problem, compared to overall accuracy.
|
| 81 |
+
|
| 82 |
+
## Citation
|
| 83 |
+
|
| 84 |
+
```bibtex
|
| 85 |
+
@article{hendrycksmath2021,
|
| 86 |
+
title={Measuring Mathematical Problem Solving With the MATH Dataset},
|
| 87 |
+
author={Dan Hendrycks
|
| 88 |
+
and Collin Burns
|
| 89 |
+
and Saurav Kadavath
|
| 90 |
+
and Akul Arora
|
| 91 |
+
and Steven Basart
|
| 92 |
+
and Eric Tang
|
| 93 |
+
and Dawn Song
|
| 94 |
+
and Jacob Steinhardt},
|
| 95 |
+
journal={arXiv preprint arXiv:2103.03874},
|
| 96 |
+
year={2021}
|
| 97 |
+
}
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
## Further References
|
| 101 |
+
- [MATH dataset](https://huggingface.co/datasets/competition_math)
|
| 102 |
+
- [MATH leaderboard](https://paperswithcode.com/sota/math-word-problem-solving-on-math)
|
| 103 |
+
- [MATH paper](https://arxiv.org/abs/2103.03874)
|
testbed/huggingface__datasets/metrics/competition_math/competition_math.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Accuracy metric for the Mathematics Aptitude Test of Heuristics (MATH) dataset."""
|
| 15 |
+
|
| 16 |
+
import math_equivalence # From: git+https://github.com/hendrycks/math.git
|
| 17 |
+
|
| 18 |
+
import datasets
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
_CITATION = """\
|
| 22 |
+
@article{hendrycksmath2021,
|
| 23 |
+
title={Measuring Mathematical Problem Solving With the MATH Dataset},
|
| 24 |
+
author={Dan Hendrycks
|
| 25 |
+
and Collin Burns
|
| 26 |
+
and Saurav Kadavath
|
| 27 |
+
and Akul Arora
|
| 28 |
+
and Steven Basart
|
| 29 |
+
and Eric Tang
|
| 30 |
+
and Dawn Song
|
| 31 |
+
and Jacob Steinhardt},
|
| 32 |
+
journal={arXiv preprint arXiv:2103.03874},
|
| 33 |
+
year={2021}
|
| 34 |
+
}
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
_DESCRIPTION = """\
|
| 39 |
+
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
|
| 40 |
+
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
_KWARGS_DESCRIPTION = r"""
|
| 45 |
+
Calculates accuracy after canonicalizing inputs.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
predictions: list of predictions to score. Each prediction
|
| 49 |
+
is a string that contains natural language and LaTex.
|
| 50 |
+
references: list of reference for each prediction. Each
|
| 51 |
+
reference is a string that contains natural language
|
| 52 |
+
and LaTex.
|
| 53 |
+
Returns:
|
| 54 |
+
accuracy: accuracy after canonicalizing inputs
|
| 55 |
+
(e.g., converting "1/2" to "\\frac{1}{2}")
|
| 56 |
+
|
| 57 |
+
Examples:
|
| 58 |
+
>>> metric = datasets.load_metric("competition_math")
|
| 59 |
+
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
|
| 60 |
+
>>> print(results)
|
| 61 |
+
{'accuracy': 1.0}
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 66 |
+
class CompetitionMathMetric(datasets.Metric):
|
| 67 |
+
"""Accuracy metric for the MATH dataset."""
|
| 68 |
+
|
| 69 |
+
def _info(self):
|
| 70 |
+
return datasets.MetricInfo(
|
| 71 |
+
description=_DESCRIPTION,
|
| 72 |
+
citation=_CITATION,
|
| 73 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 74 |
+
features=datasets.Features(
|
| 75 |
+
{
|
| 76 |
+
"predictions": datasets.Value("string"),
|
| 77 |
+
"references": datasets.Value("string"),
|
| 78 |
+
}
|
| 79 |
+
),
|
| 80 |
+
# Homepage of the metric for documentation
|
| 81 |
+
homepage="https://github.com/hendrycks/math",
|
| 82 |
+
# Additional links to the codebase or references
|
| 83 |
+
codebase_urls=["https://github.com/hendrycks/math"],
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
def _compute(self, predictions, references):
|
| 87 |
+
"""Returns the scores"""
|
| 88 |
+
n_correct = 0.0
|
| 89 |
+
for i, j in zip(predictions, references):
|
| 90 |
+
n_correct += 1.0 if math_equivalence.is_equiv(i, j) else 0.0
|
| 91 |
+
accuracy = n_correct / len(predictions)
|
| 92 |
+
return {
|
| 93 |
+
"accuracy": accuracy,
|
| 94 |
+
}
|
testbed/huggingface__datasets/metrics/coval/README.md
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for COVAL
|
| 2 |
+
|
| 3 |
+
## Metric description
|
| 4 |
+
|
| 5 |
+
CoVal is a coreference evaluation tool for the [CoNLL](https://huggingface.co/datasets/conll2003) and [ARRAU](https://catalog.ldc.upenn.edu/LDC2013T22) datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995](https://aclanthology.org/M95-1005.pdf), B-cubed [Bagga and Baldwin, 1998](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.34.2578&rep=rep1&type=pdf), CEAFe [Luo et al., 2005](https://aclanthology.org/H05-1004.pdf), LEA [Moosavi and Strube, 2016](https://aclanthology.org/P16-1060.pdf) and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe).
|
| 6 |
+
|
| 7 |
+
CoVal code was written by [`@ns-moosavi`](https://github.com/ns-moosavi), with some parts borrowed from [Deep Coref](https://github.com/clarkkev/deep-coref/blob/master/evaluation.py). The test suite is taken from the [official CoNLL code](https://github.com/conll/reference-coreference-scorers/), with additions by [`@andreasvc`](https://github.com/andreasvc) and file parsing developed by Leo Born.
|
| 8 |
+
|
| 9 |
+
## How to use
|
| 10 |
+
|
| 11 |
+
The metric takes two lists of sentences as input: one representing `predictions` and `references`, with the sentences consisting of words in the CoNLL format (see the [Limitations and bias](#Limitations-and-bias) section below for more details on the CoNLL format).
|
| 12 |
+
|
| 13 |
+
```python
|
| 14 |
+
from datasets import load_metric
|
| 15 |
+
coval = load_metric('coval')
|
| 16 |
+
words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
|
| 17 |
+
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
|
| 18 |
+
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
|
| 19 |
+
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
|
| 20 |
+
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
|
| 21 |
+
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
|
| 22 |
+
references = [words]
|
| 23 |
+
predictions = [words]
|
| 24 |
+
results = coval.compute(predictions=predictions, references=references)
|
| 25 |
+
```
|
| 26 |
+
It also has several optional arguments:
|
| 27 |
+
|
| 28 |
+
`keep_singletons`: After extracting all mentions of key or system file mentions whose corresponding coreference chain is of size one are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting `keep_singletons=False`, all singletons in the key and system files will be excluded from the evaluation.
|
| 29 |
+
|
| 30 |
+
`NP_only`: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the `NP_only` option, the scorer will only evaluate the resolution of NPs.
|
| 31 |
+
|
| 32 |
+
`min_span`: By setting `min_span`, the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the [MINA algorithm](https://arxiv.org/pdf/1906.06703.pdf).
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
## Output values
|
| 36 |
+
|
| 37 |
+
The metric outputs a dictionary with the following key-value pairs:
|
| 38 |
+
|
| 39 |
+
`mentions`: number of mentions, ranges from 0-1
|
| 40 |
+
|
| 41 |
+
`muc`: MUC metric, which expresses performance in terms of recall and precision, ranging from 0-1.
|
| 42 |
+
|
| 43 |
+
`bcub`: B-cubed metric, which is the averaged precision of all items in the distribution, ranging from 0-1.
|
| 44 |
+
|
| 45 |
+
`ceafe`: CEAFe (Constrained Entity Alignment F-Measure) is computed by aligning reference and system entities with the constraint that a reference entity is aligned with at most one reference entity. It ranges from 0-1
|
| 46 |
+
|
| 47 |
+
`lea`: LEA is a Link-Based Entity-Aware metric which, for each entity, considers how important the entity is and how well it is resolved. It ranges from 0-1.
|
| 48 |
+
|
| 49 |
+
`conll_score`: averaged CoNLL score (the average of the F1 values of `muc`, `bcub` and `ceafe`), ranging from 0 to 100.
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
### Values from popular papers
|
| 53 |
+
|
| 54 |
+
Given that many of the metrics returned by COVAL come from different sources, is it hard to cite reference values for all of them.
|
| 55 |
+
|
| 56 |
+
The CoNLL score is used to track progress on different datasets such as the [ARRAU corpus](https://paperswithcode.com/sota/coreference-resolution-on-the-arrau-corpus) and [CoNLL 2012](https://paperswithcode.com/sota/coreference-resolution-on-conll-2012).
|
| 57 |
+
|
| 58 |
+
## Examples
|
| 59 |
+
|
| 60 |
+
Maximal values
|
| 61 |
+
|
| 62 |
+
```python
|
| 63 |
+
from datasets import load_metric
|
| 64 |
+
coval = load_metric('coval')
|
| 65 |
+
words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
|
| 66 |
+
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
|
| 67 |
+
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
|
| 68 |
+
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
|
| 69 |
+
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
|
| 70 |
+
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
|
| 71 |
+
references = [words]
|
| 72 |
+
predictions = [words]
|
| 73 |
+
results = coval.compute(predictions=predictions, references=references)
|
| 74 |
+
print(results)
|
| 75 |
+
{'mentions/recall': 1.0, 'mentions/precision': 1.0, 'mentions/f1': 1.0, 'muc/recall': 1.0, 'muc/precision': 1.0, 'muc/f1': 1.0, 'bcub/recall': 1.0, 'bcub/precision': 1.0, 'bcub/f1': 1.0, 'ceafe/recall': 1.0, 'ceafe/precision': 1.0, 'ceafe/f1': 1.0, 'lea/recall': 1.0, 'lea/precision': 1.0, 'lea/f1': 1.0, 'conll_score': 100.0}
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
## Limitations and bias
|
| 79 |
+
|
| 80 |
+
This wrapper of CoVal currently only works with [CoNLL line format](https://huggingface.co/datasets/conll2003), which has one word per line with all the annotation for this word in column separated by spaces:
|
| 81 |
+
|
| 82 |
+
| Column | Type | Description |
|
| 83 |
+
|:-------|:----------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
| 84 |
+
| 1 | Document ID | This is a variation on the document filename |
|
| 85 |
+
| 2 | Part number | Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. |
|
| 86 |
+
| 3 | Word number | |
|
| 87 |
+
| 4 | Word | This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. |
|
| 88 |
+
| 5 | Part-of-Speech | |
|
| 89 |
+
| 6 | Parse bit | This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. |
|
| 90 |
+
| 7 | Predicate lemma | The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-". |
|
| 91 |
+
| 8 | Predicate Frameset ID | This is the PropBank frameset ID of the predicate in Column 7. |
|
| 92 |
+
| 9 | Word sense | This is the word sense of the word in Column 3. |
|
| 93 |
+
| 10 | Speaker/Author | This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. |
|
| 94 |
+
| 11 | Named Entities | These columns identifies the spans representing various named entities. |
|
| 95 |
+
| 12:N | Predicate Arguments | There is one column each of predicate argument structure information for the predicate mentioned in Column 7. |
|
| 96 |
+
| N | Coreference | Coreference chain information encoded in a parenthesis structure. |
|
| 97 |
+
|
| 98 |
+
## Citations
|
| 99 |
+
|
| 100 |
+
```bibtex
|
| 101 |
+
@InProceedings{moosavi2019minimum,
|
| 102 |
+
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
|
| 103 |
+
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
|
| 104 |
+
year = {2019},
|
| 105 |
+
booktitle = {Proceedings of the 57th Annual Meeting of
|
| 106 |
+
the Association for Computational Linguistics (Volume 1: Long Papers)},
|
| 107 |
+
publisher = {Association for Computational Linguistics},
|
| 108 |
+
address = {Florence, Italy},
|
| 109 |
+
}
|
| 110 |
+
```
|
| 111 |
+
```bibtex
|
| 112 |
+
@inproceedings{10.3115/1072399.1072405,
|
| 113 |
+
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
|
| 114 |
+
title = {A Model-Theoretic Coreference Scoring Scheme},
|
| 115 |
+
year = {1995},
|
| 116 |
+
isbn = {1558604022},
|
| 117 |
+
publisher = {Association for Computational Linguistics},
|
| 118 |
+
address = {USA},
|
| 119 |
+
url = {https://doi.org/10.3115/1072399.1072405},
|
| 120 |
+
doi = {10.3115/1072399.1072405},
|
| 121 |
+
booktitle = {Proceedings of the 6th Conference on Message Understanding},
|
| 122 |
+
pages = {45–52},
|
| 123 |
+
numpages = {8},
|
| 124 |
+
location = {Columbia, Maryland},
|
| 125 |
+
series = {MUC6 ’95}
|
| 126 |
+
}
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
```bibtex
|
| 130 |
+
@INPROCEEDINGS{Bagga98algorithmsfor,
|
| 131 |
+
author = {Amit Bagga and Breck Baldwin},
|
| 132 |
+
title = {Algorithms for Scoring Coreference Chains},
|
| 133 |
+
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
|
| 134 |
+
year = {1998},
|
| 135 |
+
pages = {563--566}
|
| 136 |
+
}
|
| 137 |
+
```
|
| 138 |
+
```bibtex
|
| 139 |
+
@INPROCEEDINGS{Luo05oncoreference,
|
| 140 |
+
author = {Xiaoqiang Luo},
|
| 141 |
+
title = {On coreference resolution performance metrics},
|
| 142 |
+
booktitle = {In Proc. of HLT/EMNLP},
|
| 143 |
+
year = {2005},
|
| 144 |
+
pages = {25--32},
|
| 145 |
+
publisher = {URL}
|
| 146 |
+
}
|
| 147 |
+
```
|
| 148 |
+
|
| 149 |
+
```bibtex
|
| 150 |
+
@inproceedings{moosavi-strube-2016-coreference,
|
| 151 |
+
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
|
| 152 |
+
author = "Moosavi, Nafise Sadat and
|
| 153 |
+
Strube, Michael",
|
| 154 |
+
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
|
| 155 |
+
month = aug,
|
| 156 |
+
year = "2016",
|
| 157 |
+
address = "Berlin, Germany",
|
| 158 |
+
publisher = "Association for Computational Linguistics",
|
| 159 |
+
url = "https://www.aclweb.org/anthology/P16-1060",
|
| 160 |
+
doi = "10.18653/v1/P16-1060",
|
| 161 |
+
pages = "632--642",
|
| 162 |
+
}
|
| 163 |
+
```
|
| 164 |
+
|
| 165 |
+
## Further References
|
| 166 |
+
|
| 167 |
+
- [CoNLL 2012 Task Description](http://www.conll.cemantix.org/2012/data.html): for information on the format (section "*_conll File Format")
|
| 168 |
+
- [CoNLL Evaluation details](https://github.com/ns-moosavi/coval/blob/master/conll/README.md)
|
| 169 |
+
- [Hugging Face - Neural Coreference Resolution (Neuralcoref)](https://huggingface.co/coref/)
|
| 170 |
+
|
testbed/huggingface__datasets/metrics/coval/coval.py
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
""" CoVal metric. """
|
| 15 |
+
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
|
| 16 |
+
from coval.conll import reader, util
|
| 17 |
+
from coval.eval import evaluator
|
| 18 |
+
|
| 19 |
+
import datasets
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = datasets.logging.get_logger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
_CITATION = """\
|
| 26 |
+
@InProceedings{moosavi2019minimum,
|
| 27 |
+
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
|
| 28 |
+
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
|
| 29 |
+
year = {2019},
|
| 30 |
+
booktitle = {Proceedings of the 57th Annual Meeting of
|
| 31 |
+
the Association for Computational Linguistics (Volume 1: Long Papers)},
|
| 32 |
+
publisher = {Association for Computational Linguistics},
|
| 33 |
+
address = {Florence, Italy},
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
@inproceedings{10.3115/1072399.1072405,
|
| 37 |
+
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
|
| 38 |
+
title = {A Model-Theoretic Coreference Scoring Scheme},
|
| 39 |
+
year = {1995},
|
| 40 |
+
isbn = {1558604022},
|
| 41 |
+
publisher = {Association for Computational Linguistics},
|
| 42 |
+
address = {USA},
|
| 43 |
+
url = {https://doi.org/10.3115/1072399.1072405},
|
| 44 |
+
doi = {10.3115/1072399.1072405},
|
| 45 |
+
booktitle = {Proceedings of the 6th Conference on Message Understanding},
|
| 46 |
+
pages = {45–52},
|
| 47 |
+
numpages = {8},
|
| 48 |
+
location = {Columbia, Maryland},
|
| 49 |
+
series = {MUC6 ’95}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
@INPROCEEDINGS{Bagga98algorithmsfor,
|
| 53 |
+
author = {Amit Bagga and Breck Baldwin},
|
| 54 |
+
title = {Algorithms for Scoring Coreference Chains},
|
| 55 |
+
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
|
| 56 |
+
year = {1998},
|
| 57 |
+
pages = {563--566}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
@INPROCEEDINGS{Luo05oncoreference,
|
| 61 |
+
author = {Xiaoqiang Luo},
|
| 62 |
+
title = {On coreference resolution performance metrics},
|
| 63 |
+
booktitle = {In Proc. of HLT/EMNLP},
|
| 64 |
+
year = {2005},
|
| 65 |
+
pages = {25--32},
|
| 66 |
+
publisher = {URL}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
@inproceedings{moosavi-strube-2016-coreference,
|
| 70 |
+
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
|
| 71 |
+
author = "Moosavi, Nafise Sadat and
|
| 72 |
+
Strube, Michael",
|
| 73 |
+
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
|
| 74 |
+
month = aug,
|
| 75 |
+
year = "2016",
|
| 76 |
+
address = "Berlin, Germany",
|
| 77 |
+
publisher = "Association for Computational Linguistics",
|
| 78 |
+
url = "https://www.aclweb.org/anthology/P16-1060",
|
| 79 |
+
doi = "10.18653/v1/P16-1060",
|
| 80 |
+
pages = "632--642",
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
_DESCRIPTION = """\
|
| 86 |
+
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
|
| 87 |
+
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
|
| 88 |
+
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
|
| 89 |
+
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
|
| 90 |
+
(the average of the F1 values of MUC, B-cubed and CEAFe)
|
| 91 |
+
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
|
| 92 |
+
|
| 93 |
+
This wrapper of CoVal currently only work with CoNLL line format:
|
| 94 |
+
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
|
| 95 |
+
Column Type Description
|
| 96 |
+
1 Document ID This is a variation on the document filename
|
| 97 |
+
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
|
| 98 |
+
3 Word number
|
| 99 |
+
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
|
| 100 |
+
5 Part-of-Speech
|
| 101 |
+
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
|
| 102 |
+
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
|
| 103 |
+
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
|
| 104 |
+
9 Word sense This is the word sense of the word in Column 3.
|
| 105 |
+
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
|
| 106 |
+
11 Named Entities These columns identifies the spans representing various named entities.
|
| 107 |
+
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
|
| 108 |
+
N Coreference Coreference chain information encoded in a parenthesis structure.
|
| 109 |
+
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
|
| 110 |
+
|
| 111 |
+
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
|
| 112 |
+
|
| 113 |
+
CoVal code was written by @ns-moosavi.
|
| 114 |
+
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
|
| 115 |
+
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
|
| 116 |
+
Mention evaluation and the test suite are added by @andreasvc.
|
| 117 |
+
Parsing CoNLL files is developed by Leo Born.
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
_KWARGS_DESCRIPTION = """
|
| 121 |
+
Calculates coreference evaluation metrics.
|
| 122 |
+
Args:
|
| 123 |
+
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
|
| 124 |
+
Each prediction is a word with its annotations as a string made of columns joined with spaces.
|
| 125 |
+
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
|
| 126 |
+
See the details on the format in the description of the metric.
|
| 127 |
+
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
|
| 128 |
+
Each reference is a word with its annotations as a string made of columns joined with spaces.
|
| 129 |
+
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
|
| 130 |
+
See the details on the format in the description of the metric.
|
| 131 |
+
keep_singletons: After extracting all mentions of key or system files,
|
| 132 |
+
mentions whose corresponding coreference chain is of size one,
|
| 133 |
+
are considered as singletons. The default evaluation mode will include
|
| 134 |
+
singletons in evaluations if they are included in the key or the system files.
|
| 135 |
+
By setting 'keep_singletons=False', all singletons in the key and system files
|
| 136 |
+
will be excluded from the evaluation.
|
| 137 |
+
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
|
| 138 |
+
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
|
| 139 |
+
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
|
| 140 |
+
Minimum spans are determined using the MINA algorithm.
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
'mentions': mentions
|
| 144 |
+
'muc': MUC metric [Vilain et al, 1995]
|
| 145 |
+
'bcub': B-cubed [Bagga and Baldwin, 1998]
|
| 146 |
+
'ceafe': CEAFe [Luo et al., 2005]
|
| 147 |
+
'lea': LEA [Moosavi and Strube, 2016]
|
| 148 |
+
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
|
| 149 |
+
|
| 150 |
+
Examples:
|
| 151 |
+
|
| 152 |
+
>>> coval = datasets.load_metric('coval')
|
| 153 |
+
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
|
| 154 |
+
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
|
| 155 |
+
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
|
| 156 |
+
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
|
| 157 |
+
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
|
| 158 |
+
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
|
| 159 |
+
>>> references = [words]
|
| 160 |
+
>>> predictions = [words]
|
| 161 |
+
>>> results = coval.compute(predictions=predictions, references=references)
|
| 162 |
+
>>> print(results) # doctest:+ELLIPSIS
|
| 163 |
+
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
|
| 164 |
+
"""
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def get_coref_infos(
|
| 168 |
+
key_lines, sys_lines, NP_only=False, remove_nested=False, keep_singletons=True, min_span=False, doc="dummy_doc"
|
| 169 |
+
):
|
| 170 |
+
key_doc_lines = {doc: key_lines}
|
| 171 |
+
sys_doc_lines = {doc: sys_lines}
|
| 172 |
+
|
| 173 |
+
doc_coref_infos = {}
|
| 174 |
+
|
| 175 |
+
key_nested_coref_num = 0
|
| 176 |
+
sys_nested_coref_num = 0
|
| 177 |
+
key_removed_nested_clusters = 0
|
| 178 |
+
sys_removed_nested_clusters = 0
|
| 179 |
+
key_singletons_num = 0
|
| 180 |
+
sys_singletons_num = 0
|
| 181 |
+
|
| 182 |
+
key_clusters, singletons_num = reader.get_doc_mentions(doc, key_doc_lines[doc], keep_singletons)
|
| 183 |
+
key_singletons_num += singletons_num
|
| 184 |
+
|
| 185 |
+
if NP_only or min_span:
|
| 186 |
+
key_clusters = reader.set_annotated_parse_trees(key_clusters, key_doc_lines[doc], NP_only, min_span)
|
| 187 |
+
|
| 188 |
+
sys_clusters, singletons_num = reader.get_doc_mentions(doc, sys_doc_lines[doc], keep_singletons)
|
| 189 |
+
sys_singletons_num += singletons_num
|
| 190 |
+
|
| 191 |
+
if NP_only or min_span:
|
| 192 |
+
sys_clusters = reader.set_annotated_parse_trees(sys_clusters, key_doc_lines[doc], NP_only, min_span)
|
| 193 |
+
|
| 194 |
+
if remove_nested:
|
| 195 |
+
nested_mentions, removed_clusters = reader.remove_nested_coref_mentions(key_clusters, keep_singletons)
|
| 196 |
+
key_nested_coref_num += nested_mentions
|
| 197 |
+
key_removed_nested_clusters += removed_clusters
|
| 198 |
+
|
| 199 |
+
nested_mentions, removed_clusters = reader.remove_nested_coref_mentions(sys_clusters, keep_singletons)
|
| 200 |
+
sys_nested_coref_num += nested_mentions
|
| 201 |
+
sys_removed_nested_clusters += removed_clusters
|
| 202 |
+
|
| 203 |
+
sys_mention_key_cluster = reader.get_mention_assignments(sys_clusters, key_clusters)
|
| 204 |
+
key_mention_sys_cluster = reader.get_mention_assignments(key_clusters, sys_clusters)
|
| 205 |
+
|
| 206 |
+
doc_coref_infos[doc] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
|
| 207 |
+
|
| 208 |
+
if remove_nested:
|
| 209 |
+
logger.info(
|
| 210 |
+
"Number of removed nested coreferring mentions in the key "
|
| 211 |
+
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}"
|
| 212 |
+
)
|
| 213 |
+
logger.info(
|
| 214 |
+
"Number of resulting singleton clusters in the key "
|
| 215 |
+
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}"
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
if not keep_singletons:
|
| 219 |
+
logger.info(
|
| 220 |
+
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
|
| 221 |
+
"files, respectively"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
return doc_coref_infos
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def evaluate(key_lines, sys_lines, metrics, NP_only, remove_nested, keep_singletons, min_span):
|
| 228 |
+
doc_coref_infos = get_coref_infos(key_lines, sys_lines, NP_only, remove_nested, keep_singletons, min_span)
|
| 229 |
+
|
| 230 |
+
output_scores = {}
|
| 231 |
+
conll = 0
|
| 232 |
+
conll_subparts_num = 0
|
| 233 |
+
|
| 234 |
+
for name, metric in metrics:
|
| 235 |
+
recall, precision, f1 = evaluator.evaluate_documents(doc_coref_infos, metric, beta=1)
|
| 236 |
+
if name in ["muc", "bcub", "ceafe"]:
|
| 237 |
+
conll += f1
|
| 238 |
+
conll_subparts_num += 1
|
| 239 |
+
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": f1})
|
| 240 |
+
|
| 241 |
+
logger.info(
|
| 242 |
+
name.ljust(10),
|
| 243 |
+
f"Recall: {recall * 100:.2f}",
|
| 244 |
+
f" Precision: {precision * 100:.2f}",
|
| 245 |
+
f" F1: {f1 * 100:.2f}",
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
if conll_subparts_num == 3:
|
| 249 |
+
conll = (conll / 3) * 100
|
| 250 |
+
logger.info(f"CoNLL score: {conll:.2f}")
|
| 251 |
+
output_scores.update({"conll_score": conll})
|
| 252 |
+
|
| 253 |
+
return output_scores
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def check_gold_parse_annotation(key_lines):
|
| 257 |
+
has_gold_parse = False
|
| 258 |
+
for line in key_lines:
|
| 259 |
+
if not line.startswith("#"):
|
| 260 |
+
if len(line.split()) > 6:
|
| 261 |
+
parse_col = line.split()[5]
|
| 262 |
+
if not parse_col == "-":
|
| 263 |
+
has_gold_parse = True
|
| 264 |
+
break
|
| 265 |
+
else:
|
| 266 |
+
break
|
| 267 |
+
return has_gold_parse
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 271 |
+
class Coval(datasets.Metric):
|
| 272 |
+
def _info(self):
|
| 273 |
+
return datasets.MetricInfo(
|
| 274 |
+
description=_DESCRIPTION,
|
| 275 |
+
citation=_CITATION,
|
| 276 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 277 |
+
features=datasets.Features(
|
| 278 |
+
{
|
| 279 |
+
"predictions": datasets.Sequence(datasets.Value("string")),
|
| 280 |
+
"references": datasets.Sequence(datasets.Value("string")),
|
| 281 |
+
}
|
| 282 |
+
),
|
| 283 |
+
codebase_urls=["https://github.com/ns-moosavi/coval"],
|
| 284 |
+
reference_urls=[
|
| 285 |
+
"https://github.com/ns-moosavi/coval",
|
| 286 |
+
"https://www.aclweb.org/anthology/P16-1060",
|
| 287 |
+
"http://www.conll.cemantix.org/2012/data.html",
|
| 288 |
+
],
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
def _compute(
|
| 292 |
+
self, predictions, references, keep_singletons=True, NP_only=False, min_span=False, remove_nested=False
|
| 293 |
+
):
|
| 294 |
+
allmetrics = [
|
| 295 |
+
("mentions", evaluator.mentions),
|
| 296 |
+
("muc", evaluator.muc),
|
| 297 |
+
("bcub", evaluator.b_cubed),
|
| 298 |
+
("ceafe", evaluator.ceafe),
|
| 299 |
+
("lea", evaluator.lea),
|
| 300 |
+
]
|
| 301 |
+
|
| 302 |
+
if min_span:
|
| 303 |
+
has_gold_parse = util.check_gold_parse_annotation(references)
|
| 304 |
+
if not has_gold_parse:
|
| 305 |
+
raise NotImplementedError("References should have gold parse annotation to use 'min_span'.")
|
| 306 |
+
# util.parse_key_file(key_file)
|
| 307 |
+
# key_file = key_file + ".parsed"
|
| 308 |
+
|
| 309 |
+
score = evaluate(
|
| 310 |
+
key_lines=references,
|
| 311 |
+
sys_lines=predictions,
|
| 312 |
+
metrics=allmetrics,
|
| 313 |
+
NP_only=NP_only,
|
| 314 |
+
remove_nested=remove_nested,
|
| 315 |
+
keep_singletons=keep_singletons,
|
| 316 |
+
min_span=min_span,
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
return score
|
testbed/huggingface__datasets/metrics/cuad/README.md
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for CUAD
|
| 2 |
+
|
| 3 |
+
## Metric description
|
| 4 |
+
|
| 5 |
+
This metric wraps the official scoring script for version 1 of the [Contract Understanding Atticus Dataset (CUAD)](https://huggingface.co/datasets/cuad), which is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
|
| 6 |
+
|
| 7 |
+
The CUAD metric computes several scores: [Exact Match](https://huggingface.co/metrics/exact_match), [F1 score](https://huggingface.co/metrics/f1), Area Under the Precision-Recall Curve, [Precision](https://huggingface.co/metrics/precision) at 80% [recall](https://huggingface.co/metrics/recall) and Precision at 90% recall.
|
| 8 |
+
|
| 9 |
+
## How to use
|
| 10 |
+
|
| 11 |
+
The CUAD metric takes two inputs :
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
`predictions`, a list of question-answer dictionaries with the following key-values:
|
| 15 |
+
- `id`: the id of the question-answer pair as given in the references.
|
| 16 |
+
- `prediction_text`: a list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction.
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
`references`: a list of question-answer dictionaries with the following key-values:
|
| 20 |
+
- `id`: the id of the question-answer pair (the same as above).
|
| 21 |
+
- `answers`: a dictionary *in the CUAD dataset format* with the following keys:
|
| 22 |
+
- `text`: a list of possible texts for the answer, as a list of strings.
|
| 23 |
+
- `answer_start`: a list of start positions for the answer, as a list of ints.
|
| 24 |
+
|
| 25 |
+
Note that `answer_start` values are not taken into account to compute the metric.
|
| 26 |
+
|
| 27 |
+
```python
|
| 28 |
+
from datasets import load_metric
|
| 29 |
+
cuad_metric = load_metric("cuad")
|
| 30 |
+
predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
|
| 31 |
+
references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
|
| 32 |
+
results = cuad_metric.compute(predictions=predictions, references=references)
|
| 33 |
+
```
|
| 34 |
+
## Output values
|
| 35 |
+
|
| 36 |
+
The output of the CUAD metric consists of a dictionary that contains one or several of the following metrics:
|
| 37 |
+
|
| 38 |
+
`exact_match`: The normalized answers that exactly match the reference answer, with a range between 0.0 and 1.0 (see [exact match](https://huggingface.co/metrics/exact_match) for more information).
|
| 39 |
+
|
| 40 |
+
`f1`: The harmonic mean of the precision and recall (see [F1 score](https://huggingface.co/metrics/f1) for more information). Its range is between 0.0 and 1.0 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall.
|
| 41 |
+
|
| 42 |
+
`aupr`: The Area Under the Precision-Recall curve, with a range between 0.0 and 1.0, with a higher value representing both high recall and high precision, and a low value representing low values for both. See the [Wikipedia article](https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve) for more information.
|
| 43 |
+
|
| 44 |
+
`prec_at_80_recall`: The fraction of true examples among the predicted examples at a recall rate of 80%. Its range is between 0.0 and 1.0. For more information, see [precision](https://huggingface.co/metrics/precision) and [recall](https://huggingface.co/metrics/recall).
|
| 45 |
+
|
| 46 |
+
`prec_at_90_recall`: The fraction of true examples among the predicted examples at a recall rate of 90%. Its range is between 0.0 and 1.0.
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
### Values from popular papers
|
| 50 |
+
The [original CUAD paper](https://arxiv.org/pdf/2103.06268.pdf) reports that a [DeBERTa model](https://huggingface.co/microsoft/deberta-base) attains
|
| 51 |
+
an AUPR of 47.8%, a Precision at 80% Recall of 44.0%, and a Precision at 90% Recall of 17.8% (they do not report F1 or Exact Match separately).
|
| 52 |
+
|
| 53 |
+
For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/cuad).
|
| 54 |
+
|
| 55 |
+
## Examples
|
| 56 |
+
|
| 57 |
+
Maximal values :
|
| 58 |
+
|
| 59 |
+
```python
|
| 60 |
+
from datasets import load_metric
|
| 61 |
+
cuad_metric = load_metric("cuad")
|
| 62 |
+
predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
|
| 63 |
+
references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
|
| 64 |
+
results = cuad_metric.compute(predictions=predictions, references=references)
|
| 65 |
+
print(results)
|
| 66 |
+
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
Minimal values:
|
| 70 |
+
|
| 71 |
+
```python
|
| 72 |
+
from datasets import load_metric
|
| 73 |
+
cuad_metric = load_metric("cuad")
|
| 74 |
+
predictions = [{'prediction_text': ['The Company appoints the Distributor as an exclusive distributor of Products in the Market, subject to the terms and conditions of this Agreement.'], 'id': 'LIMEENERGYCO_09_09_1999-EX-10-DISTRIBUTOR AGREEMENT__Exclusivity_0'}]
|
| 75 |
+
references = [{'answers': {'answer_start': [143], 'text': 'The seller'}, 'id': 'LIMEENERGYCO_09_09_1999-EX-10-DISTRIBUTOR AGREEMENT__Exclusivity_0'}]
|
| 76 |
+
results = cuad_metric.compute(predictions=predictions, references=references)
|
| 77 |
+
print(results)
|
| 78 |
+
{'exact_match': 0.0, 'f1': 0.0, 'aupr': 0.0, 'prec_at_80_recall': 0, 'prec_at_90_recall': 0}
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
Partial match:
|
| 82 |
+
|
| 83 |
+
```python
|
| 84 |
+
from datasets import load_metric
|
| 85 |
+
cuad_metric = load_metric("cuad")
|
| 86 |
+
predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
|
| 87 |
+
predictions = [{'prediction_text': ['The Company appoints the Distributor as an exclusive distributor of Products in the Market, subject to the terms and conditions of this Agreement.', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
|
| 88 |
+
results = cuad_metric.compute(predictions=predictions, references=references)
|
| 89 |
+
print(results)
|
| 90 |
+
{'exact_match': 100.0, 'f1': 50.0, 'aupr': 0.0, 'prec_at_80_recall': 0, 'prec_at_90_recall': 0}
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
## Limitations and bias
|
| 94 |
+
This metric works only with datasets that have the same format as the [CUAD dataset](https://huggingface.co/datasets/cuad). The limitations of the biases of this dataset are not discussed, but could exhibit annotation bias given the homogeneity of annotators for this dataset.
|
| 95 |
+
|
| 96 |
+
In terms of the metric itself, the accuracy of AUPR has been debated because its estimates are quite noisy and because of the fact that reducing the Precision-Recall Curve to a single number ignores the fact that it is about the tradeoffs between the different systems or performance points plotted and not the performance of an individual system. Reporting the original F1 and exact match scores is therefore useful to ensure a more complete representation of system performance.
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
## Citation
|
| 100 |
+
|
| 101 |
+
```bibtex
|
| 102 |
+
@article{hendrycks2021cuad,
|
| 103 |
+
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
|
| 104 |
+
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
|
| 105 |
+
journal={arXiv preprint arXiv:2103.06268},
|
| 106 |
+
year={2021}
|
| 107 |
+
}
|
| 108 |
+
```
|
| 109 |
+
|
| 110 |
+
## Further References
|
| 111 |
+
|
| 112 |
+
- [CUAD dataset homepage](https://www.atticusprojectai.org/cuad-v1-performance-announcements)
|
testbed/huggingface__datasets/metrics/cuad/cuad.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
""" CUAD metric. """
|
| 15 |
+
|
| 16 |
+
import datasets
|
| 17 |
+
|
| 18 |
+
from .evaluate import evaluate
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
_CITATION = """\
|
| 22 |
+
@article{hendrycks2021cuad,
|
| 23 |
+
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
|
| 24 |
+
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
|
| 25 |
+
journal={arXiv preprint arXiv:2103.06268},
|
| 26 |
+
year={2021}
|
| 27 |
+
}
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
_DESCRIPTION = """
|
| 31 |
+
This metric wrap the official scoring script for version 1 of the Contract
|
| 32 |
+
Understanding Atticus Dataset (CUAD).
|
| 33 |
+
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
|
| 34 |
+
commercial legal contracts that have been manually labeled to identify 41 categories of important
|
| 35 |
+
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
_KWARGS_DESCRIPTION = """
|
| 39 |
+
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
|
| 40 |
+
Args:
|
| 41 |
+
predictions: List of question-answers dictionaries with the following key-values:
|
| 42 |
+
- 'id': id of the question-answer pair as given in the references (see below)
|
| 43 |
+
- 'prediction_text': list of possible texts for the answer, as a list of strings
|
| 44 |
+
depending on a threshold on the confidence probability of each prediction.
|
| 45 |
+
references: List of question-answers dictionaries with the following key-values:
|
| 46 |
+
- 'id': id of the question-answer pair (see above),
|
| 47 |
+
- 'answers': a Dict in the CUAD dataset format
|
| 48 |
+
{
|
| 49 |
+
'text': list of possible texts for the answer, as a list of strings
|
| 50 |
+
'answer_start': list of start positions for the answer, as a list of ints
|
| 51 |
+
}
|
| 52 |
+
Note that answer_start values are not taken into account to compute the metric.
|
| 53 |
+
Returns:
|
| 54 |
+
'exact_match': Exact match (the normalized answer exactly match the gold answer)
|
| 55 |
+
'f1': The F-score of predicted tokens versus the gold answer
|
| 56 |
+
'aupr': Area Under the Precision-Recall curve
|
| 57 |
+
'prec_at_80_recall': Precision at 80% recall
|
| 58 |
+
'prec_at_90_recall': Precision at 90% recall
|
| 59 |
+
Examples:
|
| 60 |
+
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
|
| 61 |
+
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
|
| 62 |
+
>>> cuad_metric = datasets.load_metric("cuad")
|
| 63 |
+
>>> results = cuad_metric.compute(predictions=predictions, references=references)
|
| 64 |
+
>>> print(results)
|
| 65 |
+
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 70 |
+
class CUAD(datasets.Metric):
|
| 71 |
+
def _info(self):
|
| 72 |
+
return datasets.MetricInfo(
|
| 73 |
+
description=_DESCRIPTION,
|
| 74 |
+
citation=_CITATION,
|
| 75 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 76 |
+
features=datasets.Features(
|
| 77 |
+
{
|
| 78 |
+
"predictions": {
|
| 79 |
+
"id": datasets.Value("string"),
|
| 80 |
+
"prediction_text": datasets.features.Sequence(datasets.Value("string")),
|
| 81 |
+
},
|
| 82 |
+
"references": {
|
| 83 |
+
"id": datasets.Value("string"),
|
| 84 |
+
"answers": datasets.features.Sequence(
|
| 85 |
+
{
|
| 86 |
+
"text": datasets.Value("string"),
|
| 87 |
+
"answer_start": datasets.Value("int32"),
|
| 88 |
+
}
|
| 89 |
+
),
|
| 90 |
+
},
|
| 91 |
+
}
|
| 92 |
+
),
|
| 93 |
+
codebase_urls=["https://www.atticusprojectai.org/cuad"],
|
| 94 |
+
reference_urls=["https://www.atticusprojectai.org/cuad"],
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
def _compute(self, predictions, references):
|
| 98 |
+
pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
|
| 99 |
+
dataset = [
|
| 100 |
+
{
|
| 101 |
+
"paragraphs": [
|
| 102 |
+
{
|
| 103 |
+
"qas": [
|
| 104 |
+
{
|
| 105 |
+
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
|
| 106 |
+
"id": ref["id"],
|
| 107 |
+
}
|
| 108 |
+
for ref in references
|
| 109 |
+
]
|
| 110 |
+
}
|
| 111 |
+
]
|
| 112 |
+
}
|
| 113 |
+
]
|
| 114 |
+
score = evaluate(dataset=dataset, predictions=pred_dict)
|
| 115 |
+
return score
|
testbed/huggingface__datasets/metrics/cuad/evaluate.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Official evaluation script for CUAD dataset. """
|
| 2 |
+
|
| 3 |
+
import argparse
|
| 4 |
+
import json
|
| 5 |
+
import re
|
| 6 |
+
import string
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
IOU_THRESH = 0.5
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def get_jaccard(prediction, ground_truth):
|
| 16 |
+
remove_tokens = [".", ",", ";", ":"]
|
| 17 |
+
|
| 18 |
+
for token in remove_tokens:
|
| 19 |
+
ground_truth = ground_truth.replace(token, "")
|
| 20 |
+
prediction = prediction.replace(token, "")
|
| 21 |
+
|
| 22 |
+
ground_truth, prediction = ground_truth.lower(), prediction.lower()
|
| 23 |
+
ground_truth, prediction = ground_truth.replace("/", " "), prediction.replace("/", " ")
|
| 24 |
+
ground_truth, prediction = set(ground_truth.split(" ")), set(prediction.split(" "))
|
| 25 |
+
|
| 26 |
+
intersection = ground_truth.intersection(prediction)
|
| 27 |
+
union = ground_truth.union(prediction)
|
| 28 |
+
jaccard = len(intersection) / len(union)
|
| 29 |
+
return jaccard
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def normalize_answer(s):
|
| 33 |
+
"""Lower text and remove punctuation, articles and extra whitespace."""
|
| 34 |
+
|
| 35 |
+
def remove_articles(text):
|
| 36 |
+
return re.sub(r"\b(a|an|the)\b", " ", text)
|
| 37 |
+
|
| 38 |
+
def white_space_fix(text):
|
| 39 |
+
return " ".join(text.split())
|
| 40 |
+
|
| 41 |
+
def remove_punc(text):
|
| 42 |
+
exclude = set(string.punctuation)
|
| 43 |
+
return "".join(ch for ch in text if ch not in exclude)
|
| 44 |
+
|
| 45 |
+
def lower(text):
|
| 46 |
+
return text.lower()
|
| 47 |
+
|
| 48 |
+
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def compute_precision_recall(predictions, ground_truths, qa_id):
|
| 52 |
+
tp, fp, fn = 0, 0, 0
|
| 53 |
+
|
| 54 |
+
substr_ok = "Parties" in qa_id
|
| 55 |
+
|
| 56 |
+
# first check if ground truth is empty
|
| 57 |
+
if len(ground_truths) == 0:
|
| 58 |
+
if len(predictions) > 0:
|
| 59 |
+
fp += len(predictions) # false positive for each one
|
| 60 |
+
else:
|
| 61 |
+
for ground_truth in ground_truths:
|
| 62 |
+
assert len(ground_truth) > 0
|
| 63 |
+
# check if there is a match
|
| 64 |
+
match_found = False
|
| 65 |
+
for pred in predictions:
|
| 66 |
+
if substr_ok:
|
| 67 |
+
is_match = get_jaccard(pred, ground_truth) >= IOU_THRESH or ground_truth in pred
|
| 68 |
+
else:
|
| 69 |
+
is_match = get_jaccard(pred, ground_truth) >= IOU_THRESH
|
| 70 |
+
if is_match:
|
| 71 |
+
match_found = True
|
| 72 |
+
|
| 73 |
+
if match_found:
|
| 74 |
+
tp += 1
|
| 75 |
+
else:
|
| 76 |
+
fn += 1
|
| 77 |
+
|
| 78 |
+
# now also get any fps by looping through preds
|
| 79 |
+
for pred in predictions:
|
| 80 |
+
# Check if there's a match. if so, don't count (don't want to double count based on the above)
|
| 81 |
+
# but if there's no match, then this is a false positive.
|
| 82 |
+
# (Note: we get the true positives in the above loop instead of this loop so that we don't double count
|
| 83 |
+
# multiple predictions that are matched with the same answer.)
|
| 84 |
+
match_found = False
|
| 85 |
+
for ground_truth in ground_truths:
|
| 86 |
+
assert len(ground_truth) > 0
|
| 87 |
+
if substr_ok:
|
| 88 |
+
is_match = get_jaccard(pred, ground_truth) >= IOU_THRESH or ground_truth in pred
|
| 89 |
+
else:
|
| 90 |
+
is_match = get_jaccard(pred, ground_truth) >= IOU_THRESH
|
| 91 |
+
if is_match:
|
| 92 |
+
match_found = True
|
| 93 |
+
|
| 94 |
+
if not match_found:
|
| 95 |
+
fp += 1
|
| 96 |
+
|
| 97 |
+
precision = tp / (tp + fp) if tp + fp > 0 else np.nan
|
| 98 |
+
recall = tp / (tp + fn) if tp + fn > 0 else np.nan
|
| 99 |
+
|
| 100 |
+
return precision, recall
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def process_precisions(precisions):
|
| 104 |
+
"""
|
| 105 |
+
Processes precisions to ensure that precision and recall don't both get worse.
|
| 106 |
+
Assumes the list precision is sorted in order of recalls
|
| 107 |
+
"""
|
| 108 |
+
precision_best = precisions[::-1]
|
| 109 |
+
for i in range(1, len(precision_best)):
|
| 110 |
+
precision_best[i] = max(precision_best[i - 1], precision_best[i])
|
| 111 |
+
precisions = precision_best[::-1]
|
| 112 |
+
return precisions
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def get_aupr(precisions, recalls):
|
| 116 |
+
processed_precisions = process_precisions(precisions)
|
| 117 |
+
aupr = np.trapz(processed_precisions, recalls)
|
| 118 |
+
if np.isnan(aupr):
|
| 119 |
+
return 0
|
| 120 |
+
return aupr
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def get_prec_at_recall(precisions, recalls, recall_thresh):
|
| 124 |
+
"""Assumes recalls are sorted in increasing order"""
|
| 125 |
+
processed_precisions = process_precisions(precisions)
|
| 126 |
+
prec_at_recall = 0
|
| 127 |
+
for prec, recall in zip(processed_precisions, recalls):
|
| 128 |
+
if recall >= recall_thresh:
|
| 129 |
+
prec_at_recall = prec
|
| 130 |
+
break
|
| 131 |
+
return prec_at_recall
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def exact_match_score(prediction, ground_truth):
|
| 135 |
+
return normalize_answer(prediction) == normalize_answer(ground_truth)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def metric_max_over_ground_truths(metric_fn, predictions, ground_truths):
|
| 139 |
+
score = 0
|
| 140 |
+
for pred in predictions:
|
| 141 |
+
for ground_truth in ground_truths:
|
| 142 |
+
score = metric_fn(pred, ground_truth)
|
| 143 |
+
if score == 1: # break the loop when one prediction matches the ground truth
|
| 144 |
+
break
|
| 145 |
+
if score == 1:
|
| 146 |
+
break
|
| 147 |
+
return score
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def evaluate(dataset, predictions):
|
| 151 |
+
f1 = exact_match = total = 0
|
| 152 |
+
precisions = []
|
| 153 |
+
recalls = []
|
| 154 |
+
for article in dataset:
|
| 155 |
+
for paragraph in article["paragraphs"]:
|
| 156 |
+
for qa in paragraph["qas"]:
|
| 157 |
+
total += 1
|
| 158 |
+
if qa["id"] not in predictions:
|
| 159 |
+
message = "Unanswered question " + qa["id"] + " will receive score 0."
|
| 160 |
+
print(message, file=sys.stderr)
|
| 161 |
+
continue
|
| 162 |
+
ground_truths = [x["text"] for x in qa["answers"]]
|
| 163 |
+
prediction = predictions[qa["id"]]
|
| 164 |
+
precision, recall = compute_precision_recall(prediction, ground_truths, qa["id"])
|
| 165 |
+
|
| 166 |
+
precisions.append(precision)
|
| 167 |
+
recalls.append(recall)
|
| 168 |
+
|
| 169 |
+
if precision == 0 and recall == 0:
|
| 170 |
+
f1 += 0
|
| 171 |
+
else:
|
| 172 |
+
f1 += 2 * (precision * recall) / (precision + recall)
|
| 173 |
+
|
| 174 |
+
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
|
| 175 |
+
|
| 176 |
+
precisions = [x for _, x in sorted(zip(recalls, precisions))]
|
| 177 |
+
recalls.sort()
|
| 178 |
+
|
| 179 |
+
f1 = 100.0 * f1 / total
|
| 180 |
+
exact_match = 100.0 * exact_match / total
|
| 181 |
+
aupr = get_aupr(precisions, recalls)
|
| 182 |
+
|
| 183 |
+
prec_at_90_recall = get_prec_at_recall(precisions, recalls, recall_thresh=0.9)
|
| 184 |
+
prec_at_80_recall = get_prec_at_recall(precisions, recalls, recall_thresh=0.8)
|
| 185 |
+
|
| 186 |
+
return {
|
| 187 |
+
"exact_match": exact_match,
|
| 188 |
+
"f1": f1,
|
| 189 |
+
"aupr": aupr,
|
| 190 |
+
"prec_at_80_recall": prec_at_80_recall,
|
| 191 |
+
"prec_at_90_recall": prec_at_90_recall,
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
if __name__ == "__main__":
|
| 196 |
+
parser = argparse.ArgumentParser(description="Evaluation for CUAD")
|
| 197 |
+
parser.add_argument("dataset_file", help="Dataset file")
|
| 198 |
+
parser.add_argument("prediction_file", help="Prediction File")
|
| 199 |
+
args = parser.parse_args()
|
| 200 |
+
with open(args.dataset_file) as dataset_file:
|
| 201 |
+
dataset_json = json.load(dataset_file)
|
| 202 |
+
dataset = dataset_json["data"]
|
| 203 |
+
with open(args.prediction_file) as prediction_file:
|
| 204 |
+
predictions = json.load(prediction_file)
|
| 205 |
+
print(json.dumps(evaluate(dataset, predictions)))
|
testbed/huggingface__datasets/metrics/exact_match/README.md
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for Exact Match
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
## Metric Description
|
| 5 |
+
A given predicted string's exact match score is 1 if it is the exact same as its reference string, and is 0 otherwise.
|
| 6 |
+
|
| 7 |
+
- **Example 1**: The exact match score of prediction "Happy Birthday!" is 0, given its reference is "Happy New Year!".
|
| 8 |
+
- **Example 2**: The exact match score of prediction "The Colour of Magic (1983)" is 1, given its reference is also "The Colour of Magic (1983)".
|
| 9 |
+
|
| 10 |
+
The exact match score of a set of predictions is the sum of all of the individual exact match scores in the set, divided by the total number of predictions in the set.
|
| 11 |
+
|
| 12 |
+
- **Example**: The exact match score of the set {Example 1, Example 2} (above) is 0.5.
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
## How to Use
|
| 16 |
+
At minimum, this metric takes as input predictions and references:
|
| 17 |
+
```python
|
| 18 |
+
>>> from datasets import load_metric
|
| 19 |
+
>>> exact_match_metric = load_metric("exact_match")
|
| 20 |
+
>>> results = exact_match_metric.compute(predictions=predictions, references=references)
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
### Inputs
|
| 24 |
+
- **`predictions`** (`list` of `str`): List of predicted texts.
|
| 25 |
+
- **`references`** (`list` of `str`): List of reference texts.
|
| 26 |
+
- **`regexes_to_ignore`** (`list` of `str`): Regex expressions of characters to ignore when calculating the exact matches. Defaults to `None`. Note: the regex changes are applied before capitalization is normalized.
|
| 27 |
+
- **`ignore_case`** (`bool`): If `True`, turns everything to lowercase so that capitalization differences are ignored. Defaults to `False`.
|
| 28 |
+
- **`ignore_punctuation`** (`bool`): If `True`, removes punctuation before comparing strings. Defaults to `False`.
|
| 29 |
+
- **`ignore_numbers`** (`bool`): If `True`, removes all digits before comparing strings. Defaults to `False`.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
### Output Values
|
| 33 |
+
This metric outputs a dictionary with one value: the average exact match score.
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
{'exact_match': 100.0}
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
This metric's range is 0-100, inclusive. Here, 0.0 means no prediction/reference pairs were matches, while 100.0 means they all were.
|
| 40 |
+
|
| 41 |
+
#### Values from Popular Papers
|
| 42 |
+
The exact match metric is often included in other metrics, such as SQuAD. For example, the [original SQuAD paper](https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf) reported an Exact Match score of 40.0%. They also report that the human performance Exact Match score on the dataset was 80.3%.
|
| 43 |
+
|
| 44 |
+
### Examples
|
| 45 |
+
Without including any regexes to ignore:
|
| 46 |
+
```python
|
| 47 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 48 |
+
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
|
| 49 |
+
>>> preds = ["cat?", "theater", "yelling", "agent"]
|
| 50 |
+
>>> results = exact_match.compute(references=refs, predictions=preds)
|
| 51 |
+
>>> print(round(results["exact_match"], 1))
|
| 52 |
+
25.0
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
Ignoring regexes "the" and "yell", as well as ignoring case and punctuation:
|
| 56 |
+
```python
|
| 57 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 58 |
+
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
|
| 59 |
+
>>> preds = ["cat?", "theater", "yelling", "agent"]
|
| 60 |
+
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
|
| 61 |
+
>>> print(round(results["exact_match"], 1))
|
| 62 |
+
50.0
|
| 63 |
+
```
|
| 64 |
+
Note that in the example above, because the regexes are ignored before the case is normalized, "yell" from "YELLING" is not deleted.
|
| 65 |
+
|
| 66 |
+
Ignoring "the", "yell", and "YELL", as well as ignoring case and punctuation:
|
| 67 |
+
```python
|
| 68 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 69 |
+
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
|
| 70 |
+
>>> preds = ["cat?", "theater", "yelling", "agent"]
|
| 71 |
+
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
|
| 72 |
+
>>> print(round(results["exact_match"], 1))
|
| 73 |
+
75.0
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
Ignoring "the", "yell", and "YELL", as well as ignoring case, punctuation, and numbers:
|
| 77 |
+
```python
|
| 78 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 79 |
+
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
|
| 80 |
+
>>> preds = ["cat?", "theater", "yelling", "agent"]
|
| 81 |
+
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
|
| 82 |
+
>>> print(round(results["exact_match"], 1))
|
| 83 |
+
100.0
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
An example that includes sentences:
|
| 87 |
+
```python
|
| 88 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 89 |
+
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It's like comparing oranges and apples."]
|
| 90 |
+
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It's like comparing apples and oranges."]
|
| 91 |
+
>>> results = exact_match.compute(references=refs, predictions=preds)
|
| 92 |
+
>>> print(round(results["exact_match"], 1))
|
| 93 |
+
33.3
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
## Limitations and Bias
|
| 98 |
+
This metric is limited in that it outputs the same score for something that is completely wrong as for something that is correct except for a single character. In other words, there is no award for being *almost* right.
|
| 99 |
+
|
| 100 |
+
## Citation
|
| 101 |
+
|
| 102 |
+
## Further References
|
| 103 |
+
- Also used in the [SQuAD metric](https://github.com/huggingface/datasets/tree/main/metrics/squad)
|
testbed/huggingface__datasets/metrics/exact_match/exact_match.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Exact Match metric."""
|
| 15 |
+
import re
|
| 16 |
+
import string
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
|
| 20 |
+
import datasets
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
_DESCRIPTION = """
|
| 24 |
+
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
_KWARGS_DESCRIPTION = """
|
| 28 |
+
Args:
|
| 29 |
+
predictions: List of predicted texts.
|
| 30 |
+
references: List of reference texts.
|
| 31 |
+
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
|
| 32 |
+
ignore when calculating the exact matches. Note: these regexes are removed
|
| 33 |
+
from the input data before the changes based on the options below (e.g. ignore_case,
|
| 34 |
+
ignore_punctuation, ignore_numbers) are applied.
|
| 35 |
+
ignore_case: Boolean, defaults to False. If true, turns everything
|
| 36 |
+
to lowercase so that capitalization differences are ignored.
|
| 37 |
+
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
|
| 38 |
+
comparing predictions and references.
|
| 39 |
+
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
|
| 40 |
+
comparing predictions and references.
|
| 41 |
+
Returns:
|
| 42 |
+
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
|
| 43 |
+
Examples:
|
| 44 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 45 |
+
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
|
| 46 |
+
>>> preds = ["cat?", "theater", "yelling", "agent"]
|
| 47 |
+
>>> results = exact_match.compute(references=refs, predictions=preds)
|
| 48 |
+
>>> print(round(results["exact_match"], 1))
|
| 49 |
+
25.0
|
| 50 |
+
|
| 51 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 52 |
+
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
|
| 53 |
+
>>> preds = ["cat?", "theater", "yelling", "agent"]
|
| 54 |
+
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
|
| 55 |
+
>>> print(round(results["exact_match"], 1))
|
| 56 |
+
50.0
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 60 |
+
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
|
| 61 |
+
>>> preds = ["cat?", "theater", "yelling", "agent"]
|
| 62 |
+
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
|
| 63 |
+
>>> print(round(results["exact_match"], 1))
|
| 64 |
+
75.0
|
| 65 |
+
|
| 66 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 67 |
+
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
|
| 68 |
+
>>> preds = ["cat?", "theater", "yelling", "agent"]
|
| 69 |
+
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
|
| 70 |
+
>>> print(round(results["exact_match"], 1))
|
| 71 |
+
100.0
|
| 72 |
+
|
| 73 |
+
>>> exact_match = datasets.load_metric("exact_match")
|
| 74 |
+
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It's like comparing oranges and apples."]
|
| 75 |
+
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It's like comparing apples and oranges."]
|
| 76 |
+
>>> results = exact_match.compute(references=refs, predictions=preds)
|
| 77 |
+
>>> print(round(results["exact_match"], 1))
|
| 78 |
+
33.3
|
| 79 |
+
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
_CITATION = """
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 87 |
+
class ExactMatch(datasets.Metric):
|
| 88 |
+
def _info(self):
|
| 89 |
+
return datasets.MetricInfo(
|
| 90 |
+
description=_DESCRIPTION,
|
| 91 |
+
citation=_CITATION,
|
| 92 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 93 |
+
features=datasets.Features(
|
| 94 |
+
{
|
| 95 |
+
"predictions": datasets.Value("string", id="sequence"),
|
| 96 |
+
"references": datasets.Value("string", id="sequence"),
|
| 97 |
+
}
|
| 98 |
+
),
|
| 99 |
+
reference_urls=[],
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
def _compute(
|
| 103 |
+
self,
|
| 104 |
+
predictions,
|
| 105 |
+
references,
|
| 106 |
+
regexes_to_ignore=None,
|
| 107 |
+
ignore_case=False,
|
| 108 |
+
ignore_punctuation=False,
|
| 109 |
+
ignore_numbers=False,
|
| 110 |
+
):
|
| 111 |
+
if regexes_to_ignore is not None:
|
| 112 |
+
for s in regexes_to_ignore:
|
| 113 |
+
predictions = np.array([re.sub(s, "", x) for x in predictions])
|
| 114 |
+
references = np.array([re.sub(s, "", x) for x in references])
|
| 115 |
+
else:
|
| 116 |
+
predictions = np.asarray(predictions)
|
| 117 |
+
references = np.asarray(references)
|
| 118 |
+
|
| 119 |
+
if ignore_case:
|
| 120 |
+
predictions = np.char.lower(predictions)
|
| 121 |
+
references = np.char.lower(references)
|
| 122 |
+
|
| 123 |
+
if ignore_punctuation:
|
| 124 |
+
repl_table = string.punctuation.maketrans("", "", string.punctuation)
|
| 125 |
+
predictions = np.char.translate(predictions, table=repl_table)
|
| 126 |
+
references = np.char.translate(references, table=repl_table)
|
| 127 |
+
|
| 128 |
+
if ignore_numbers:
|
| 129 |
+
repl_table = string.digits.maketrans("", "", string.digits)
|
| 130 |
+
predictions = np.char.translate(predictions, table=repl_table)
|
| 131 |
+
references = np.char.translate(references, table=repl_table)
|
| 132 |
+
|
| 133 |
+
score_list = predictions == references
|
| 134 |
+
|
| 135 |
+
return {"exact_match": np.mean(score_list) * 100}
|
testbed/huggingface__datasets/metrics/f1/README.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for F1
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
## Metric Description
|
| 5 |
+
|
| 6 |
+
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
|
| 7 |
+
F1 = 2 * (precision * recall) / (precision + recall)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
## How to Use
|
| 11 |
+
|
| 12 |
+
At minimum, this metric requires predictions and references as input
|
| 13 |
+
|
| 14 |
+
```python
|
| 15 |
+
>>> f1_metric = datasets.load_metric("f1")
|
| 16 |
+
>>> results = f1_metric.compute(predictions=[0, 1], references=[0, 1])
|
| 17 |
+
>>> print(results)
|
| 18 |
+
["{'f1': 1.0}"]
|
| 19 |
+
```
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
### Inputs
|
| 23 |
+
- **predictions** (`list` of `int`): Predicted labels.
|
| 24 |
+
- **references** (`list` of `int`): Ground truth labels.
|
| 25 |
+
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
|
| 26 |
+
- **pos_label** (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
|
| 27 |
+
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
|
| 28 |
+
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
|
| 29 |
+
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
|
| 30 |
+
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
|
| 31 |
+
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
|
| 32 |
+
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
|
| 33 |
+
- **sample_weight** (`list` of `float`): Sample weights Defaults to None.
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
### Output Values
|
| 37 |
+
- **f1**(`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
|
| 38 |
+
|
| 39 |
+
Output Example(s):
|
| 40 |
+
```python
|
| 41 |
+
{'f1': 0.26666666666666666}
|
| 42 |
+
```
|
| 43 |
+
```python
|
| 44 |
+
{'f1': array([0.8, 0.0, 0.0])}
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
This metric outputs a dictionary, with either a single f1 score, of type `float`, or an array of f1 scores, with entries of type `float`.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
#### Values from Popular Papers
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
### Examples
|
| 56 |
+
|
| 57 |
+
Example 1-A simple binary example
|
| 58 |
+
```python
|
| 59 |
+
>>> f1_metric = datasets.load_metric("f1")
|
| 60 |
+
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
|
| 61 |
+
>>> print(results)
|
| 62 |
+
{'f1': 0.5}
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
|
| 66 |
+
```python
|
| 67 |
+
>>> f1_metric = datasets.load_metric("f1")
|
| 68 |
+
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
|
| 69 |
+
>>> print(round(results['f1'], 2))
|
| 70 |
+
0.67
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
|
| 74 |
+
```python
|
| 75 |
+
>>> f1_metric = datasets.load_metric("f1")
|
| 76 |
+
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
|
| 77 |
+
>>> print(round(results['f1'], 2))
|
| 78 |
+
0.35
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
Example 4-A multiclass example, with different values for the `average` input.
|
| 82 |
+
```python
|
| 83 |
+
>>> predictions = [0, 2, 1, 0, 0, 1]
|
| 84 |
+
>>> references = [0, 1, 2, 0, 1, 2]
|
| 85 |
+
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
|
| 86 |
+
>>> print(round(results['f1'], 2))
|
| 87 |
+
0.27
|
| 88 |
+
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
|
| 89 |
+
>>> print(round(results['f1'], 2))
|
| 90 |
+
0.33
|
| 91 |
+
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
|
| 92 |
+
>>> print(round(results['f1'], 2))
|
| 93 |
+
0.27
|
| 94 |
+
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
|
| 95 |
+
>>> print(results)
|
| 96 |
+
{'f1': array([0.8, 0. , 0. ])}
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
## Limitations and Bias
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
## Citation(s)
|
| 105 |
+
```bibtex
|
| 106 |
+
@article{scikit-learn,
|
| 107 |
+
title={Scikit-learn: Machine Learning in {P}ython},
|
| 108 |
+
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
|
| 109 |
+
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
|
| 110 |
+
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
|
| 111 |
+
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
|
| 112 |
+
journal={Journal of Machine Learning Research},
|
| 113 |
+
volume={12},
|
| 114 |
+
pages={2825--2830},
|
| 115 |
+
year={2011}
|
| 116 |
+
}
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
## Further References
|
testbed/huggingface__datasets/metrics/f1/f1.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""F1 metric."""
|
| 15 |
+
|
| 16 |
+
from sklearn.metrics import f1_score
|
| 17 |
+
|
| 18 |
+
import datasets
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
_DESCRIPTION = """
|
| 22 |
+
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
|
| 23 |
+
F1 = 2 * (precision * recall) / (precision + recall)
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
_KWARGS_DESCRIPTION = """
|
| 28 |
+
Args:
|
| 29 |
+
predictions (`list` of `int`): Predicted labels.
|
| 30 |
+
references (`list` of `int`): Ground truth labels.
|
| 31 |
+
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
|
| 32 |
+
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
|
| 33 |
+
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
|
| 34 |
+
|
| 35 |
+
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
|
| 36 |
+
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
|
| 37 |
+
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
|
| 38 |
+
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
|
| 39 |
+
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
|
| 40 |
+
sample_weight (`list` of `float`): Sample weights Defaults to None.
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
|
| 44 |
+
|
| 45 |
+
Examples:
|
| 46 |
+
|
| 47 |
+
Example 1-A simple binary example
|
| 48 |
+
>>> f1_metric = datasets.load_metric("f1")
|
| 49 |
+
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
|
| 50 |
+
>>> print(results)
|
| 51 |
+
{'f1': 0.5}
|
| 52 |
+
|
| 53 |
+
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
|
| 54 |
+
>>> f1_metric = datasets.load_metric("f1")
|
| 55 |
+
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
|
| 56 |
+
>>> print(round(results['f1'], 2))
|
| 57 |
+
0.67
|
| 58 |
+
|
| 59 |
+
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
|
| 60 |
+
>>> f1_metric = datasets.load_metric("f1")
|
| 61 |
+
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
|
| 62 |
+
>>> print(round(results['f1'], 2))
|
| 63 |
+
0.35
|
| 64 |
+
|
| 65 |
+
Example 4-A multiclass example, with different values for the `average` input.
|
| 66 |
+
>>> predictions = [0, 2, 1, 0, 0, 1]
|
| 67 |
+
>>> references = [0, 1, 2, 0, 1, 2]
|
| 68 |
+
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
|
| 69 |
+
>>> print(round(results['f1'], 2))
|
| 70 |
+
0.27
|
| 71 |
+
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
|
| 72 |
+
>>> print(round(results['f1'], 2))
|
| 73 |
+
0.33
|
| 74 |
+
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
|
| 75 |
+
>>> print(round(results['f1'], 2))
|
| 76 |
+
0.27
|
| 77 |
+
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
|
| 78 |
+
>>> print(results)
|
| 79 |
+
{'f1': array([0.8, 0. , 0. ])}
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
_CITATION = """
|
| 84 |
+
@article{scikit-learn,
|
| 85 |
+
title={Scikit-learn: Machine Learning in {P}ython},
|
| 86 |
+
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
|
| 87 |
+
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
|
| 88 |
+
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
|
| 89 |
+
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
|
| 90 |
+
journal={Journal of Machine Learning Research},
|
| 91 |
+
volume={12},
|
| 92 |
+
pages={2825--2830},
|
| 93 |
+
year={2011}
|
| 94 |
+
}
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 99 |
+
class F1(datasets.Metric):
|
| 100 |
+
def _info(self):
|
| 101 |
+
return datasets.MetricInfo(
|
| 102 |
+
description=_DESCRIPTION,
|
| 103 |
+
citation=_CITATION,
|
| 104 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 105 |
+
features=datasets.Features(
|
| 106 |
+
{
|
| 107 |
+
"predictions": datasets.Sequence(datasets.Value("int32")),
|
| 108 |
+
"references": datasets.Sequence(datasets.Value("int32")),
|
| 109 |
+
}
|
| 110 |
+
if self.config_name == "multilabel"
|
| 111 |
+
else {
|
| 112 |
+
"predictions": datasets.Value("int32"),
|
| 113 |
+
"references": datasets.Value("int32"),
|
| 114 |
+
}
|
| 115 |
+
),
|
| 116 |
+
reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"],
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
def _compute(self, predictions, references, labels=None, pos_label=1, average="binary", sample_weight=None):
|
| 120 |
+
score = f1_score(
|
| 121 |
+
references, predictions, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight
|
| 122 |
+
)
|
| 123 |
+
return {"f1": float(score) if score.size == 1 else score}
|
testbed/huggingface__datasets/metrics/frugalscore/README.md
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for FrugalScore
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
## Metric Description
|
| 5 |
+
FrugalScore is a reference-based metric for Natural Language Generation (NLG) model evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance.
|
| 6 |
+
|
| 7 |
+
The FrugalScore models are obtained by continuing the pretraining of small models on a synthetic dataset constructed using summarization, backtranslation and denoising models. During the training, the small models learn the internal mapping of the expensive metric, including any similarity function.
|
| 8 |
+
|
| 9 |
+
## How to use
|
| 10 |
+
|
| 11 |
+
When loading FrugalScore, you can indicate the model you wish to use to compute the score. The default model is `moussaKam/frugalscore_tiny_bert-base_bert-score`, and a full list of models can be found in the [Limitations and bias](#Limitations-and-bias) section.
|
| 12 |
+
|
| 13 |
+
```python
|
| 14 |
+
>>> from datasets import load_metric
|
| 15 |
+
>>> frugalscore = load_metric("frugalscore", "moussaKam/frugalscore_medium_bert-base_mover-score")
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
FrugalScore calculates how good are the predictions given some references, based on a set of scores.
|
| 19 |
+
|
| 20 |
+
The inputs it takes are:
|
| 21 |
+
|
| 22 |
+
`predictions`: a list of strings representing the predictions to score.
|
| 23 |
+
|
| 24 |
+
`references`: a list of string representing the references for each prediction.
|
| 25 |
+
|
| 26 |
+
Its optional arguments are:
|
| 27 |
+
|
| 28 |
+
`batch_size`: the batch size for predictions (default value is `32`).
|
| 29 |
+
|
| 30 |
+
`max_length`: the maximum sequence length (default value is `128`).
|
| 31 |
+
|
| 32 |
+
`device`: either "gpu" or "cpu" (default value is `None`).
|
| 33 |
+
|
| 34 |
+
```python
|
| 35 |
+
>>> results = frugalscore.compute(predictions=['hello there', 'huggingface'], references=['hello world', 'hugging face'], batch_size=16, max_length=64, device="gpu")
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
## Output values
|
| 39 |
+
|
| 40 |
+
The output of FrugalScore is a dictionary with the list of scores for each prediction-reference pair:
|
| 41 |
+
```python
|
| 42 |
+
{'scores': [0.6307541, 0.6449357]}
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
### Values from popular papers
|
| 46 |
+
The [original FrugalScore paper](https://arxiv.org/abs/2110.08559) reported that FrugalScore-Tiny retains 97.7/94.7% of the original performance compared to [BertScore](https://huggingface.co/metrics/bertscore) while running 54 times faster and having 84 times less parameters.
|
| 47 |
+
|
| 48 |
+
## Examples
|
| 49 |
+
|
| 50 |
+
Maximal values (exact match between `references` and `predictions`):
|
| 51 |
+
|
| 52 |
+
```python
|
| 53 |
+
>>> from datasets import load_metric
|
| 54 |
+
>>> frugalscore = load_metric("frugalscore")
|
| 55 |
+
>>> results = frugalscore.compute(predictions=['hello world'], references=['hello world'])
|
| 56 |
+
>>> print(results)
|
| 57 |
+
{'scores': [0.9891098]}
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
Partial values:
|
| 61 |
+
|
| 62 |
+
```python
|
| 63 |
+
>>> from datasets import load_metric
|
| 64 |
+
>>> frugalscore = load_metric("frugalscore")
|
| 65 |
+
>>> results = frugalscore.compute(predictions=['hello world'], references=['hugging face'])
|
| 66 |
+
>>> print(results)
|
| 67 |
+
{'scores': [0.42482382]}
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
## Limitations and bias
|
| 71 |
+
|
| 72 |
+
FrugalScore is based on [BertScore](https://huggingface.co/metrics/bertscore) and [MoverScore](https://arxiv.org/abs/1909.02622), and the models used are based on the original models used for these scores.
|
| 73 |
+
|
| 74 |
+
The full list of available models for FrugalScore is:
|
| 75 |
+
|
| 76 |
+
| FrugalScore | Student | Teacher | Method |
|
| 77 |
+
|----------------------------------------------------|-------------|----------------|------------|
|
| 78 |
+
| [moussaKam/frugalscore_tiny_bert-base_bert-score](https://huggingface.co/moussaKam/frugalscore_tiny_bert-base_bert-score) | BERT-tiny | BERT-Base | BERTScore |
|
| 79 |
+
| [moussaKam/frugalscore_small_bert-base_bert-score](https://huggingface.co/moussaKam/frugalscore_small_bert-base_bert-score) | BERT-small | BERT-Base | BERTScore |
|
| 80 |
+
| [moussaKam/frugalscore_medium_bert-base_bert-score](https://huggingface.co/moussaKam/frugalscore_medium_bert-base_bert-score) | BERT-medium | BERT-Base | BERTScore |
|
| 81 |
+
| [moussaKam/frugalscore_tiny_roberta_bert-score](https://huggingface.co/moussaKam/frugalscore_tiny_roberta_bert-score) | BERT-tiny | RoBERTa-Large | BERTScore |
|
| 82 |
+
| [moussaKam/frugalscore_small_roberta_bert-score](https://huggingface.co/moussaKam/frugalscore_small_roberta_bert-score) | BERT-small | RoBERTa-Large | BERTScore |
|
| 83 |
+
| [moussaKam/frugalscore_medium_roberta_bert-score](https://huggingface.co/moussaKam/frugalscore_medium_roberta_bert-score) | BERT-medium | RoBERTa-Large | BERTScore |
|
| 84 |
+
| [moussaKam/frugalscore_tiny_deberta_bert-score](https://huggingface.co/moussaKam/frugalscore_tiny_deberta_bert-score) | BERT-tiny | DeBERTa-XLarge | BERTScore |
|
| 85 |
+
| [moussaKam/frugalscore_small_deberta_bert-score](https://huggingface.co/moussaKam/frugalscore_small_deberta_bert-score) | BERT-small | DeBERTa-XLarge | BERTScore |
|
| 86 |
+
| [moussaKam/frugalscore_medium_deberta_bert-score](https://huggingface.co/moussaKam/frugalscore_medium_deberta_bert-score) | BERT-medium | DeBERTa-XLarge | BERTScore |
|
| 87 |
+
| [moussaKam/frugalscore_tiny_bert-base_mover-score](https://huggingface.co/moussaKam/frugalscore_tiny_bert-base_mover-score) | BERT-tiny | BERT-Base | MoverScore |
|
| 88 |
+
| [moussaKam/frugalscore_small_bert-base_mover-score](https://huggingface.co/moussaKam/frugalscore_small_bert-base_mover-score) | BERT-small | BERT-Base | MoverScore |
|
| 89 |
+
| [moussaKam/frugalscore_medium_bert-base_mover-score](https://huggingface.co/moussaKam/frugalscore_medium_bert-base_mover-score) | BERT-medium | BERT-Base | MoverScore |
|
| 90 |
+
|
| 91 |
+
Depending on the size of the model picked, the loading time will vary: the `tiny` models will load very quickly, whereas the `medium` ones can take several minutes, depending on your Internet connection.
|
| 92 |
+
|
| 93 |
+
## Citation
|
| 94 |
+
```bibtex
|
| 95 |
+
@article{eddine2021frugalscore,
|
| 96 |
+
title={FrugalScore: Learning Cheaper, Lighter and Faster Evaluation Metrics for Automatic Text Generation},
|
| 97 |
+
author={Eddine, Moussa Kamal and Shang, Guokan and Tixier, Antoine J-P and Vazirgiannis, Michalis},
|
| 98 |
+
journal={arXiv preprint arXiv:2110.08559},
|
| 99 |
+
year={2021}
|
| 100 |
+
}
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
## Further References
|
| 104 |
+
- [Original FrugalScore code](https://github.com/moussaKam/FrugalScore)
|
| 105 |
+
- [FrugalScore paper](https://arxiv.org/abs/2110.08559)
|
testbed/huggingface__datasets/metrics/frugalscore/frugalscore.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Datasets Authors and the current metric script contributor.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""FrugalScore metric."""
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments
|
| 18 |
+
|
| 19 |
+
import datasets
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
_CITATION = """\
|
| 23 |
+
@article{eddine2021frugalscore,
|
| 24 |
+
title={FrugalScore: Learning Cheaper, Lighter and Faster Evaluation Metrics for Automatic Text Generation},
|
| 25 |
+
author={Eddine, Moussa Kamal and Shang, Guokan and Tixier, Antoine J-P and Vazirgiannis, Michalis},
|
| 26 |
+
journal={arXiv preprint arXiv:2110.08559},
|
| 27 |
+
year={2021}
|
| 28 |
+
}
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
_DESCRIPTION = """\
|
| 32 |
+
FrugalScore is a reference-based metric for NLG models evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
_KWARGS_DESCRIPTION = """
|
| 37 |
+
Calculates how good are predictions given some references, using certain scores.
|
| 38 |
+
Args:
|
| 39 |
+
predictions (list of str): list of predictions to score. Each predictions
|
| 40 |
+
should be a string.
|
| 41 |
+
references (list of str): list of reference for each prediction. Each
|
| 42 |
+
reference should be a string.
|
| 43 |
+
batch_size (int): the batch size for predictions.
|
| 44 |
+
max_length (int): maximum sequence length.
|
| 45 |
+
device (str): either gpu or cpu
|
| 46 |
+
Returns:
|
| 47 |
+
scores (list of int): list of scores.
|
| 48 |
+
Examples:
|
| 49 |
+
>>> frugalscore = datasets.load_metric("frugalscore")
|
| 50 |
+
>>> results = frugalscore.compute(predictions=['hello there', 'huggingface'], references=['hello world', 'hugging face'])
|
| 51 |
+
>>> print([round(s, 3) for s in results["scores"]])
|
| 52 |
+
[0.631, 0.645]
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 57 |
+
class FRUGALSCORE(datasets.Metric):
|
| 58 |
+
def _info(self):
|
| 59 |
+
return datasets.MetricInfo(
|
| 60 |
+
description=_DESCRIPTION,
|
| 61 |
+
citation=_CITATION,
|
| 62 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 63 |
+
features=datasets.Features(
|
| 64 |
+
{
|
| 65 |
+
"predictions": datasets.Value("string"),
|
| 66 |
+
"references": datasets.Value("string"),
|
| 67 |
+
}
|
| 68 |
+
),
|
| 69 |
+
homepage="https://github.com/moussaKam/FrugalScore",
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
def _download_and_prepare(self, dl_manager):
|
| 73 |
+
if self.config_name == "default":
|
| 74 |
+
checkpoint = "moussaKam/frugalscore_tiny_bert-base_bert-score"
|
| 75 |
+
else:
|
| 76 |
+
checkpoint = self.config_name
|
| 77 |
+
self.model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
|
| 78 |
+
self.tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
| 79 |
+
|
| 80 |
+
def _compute(
|
| 81 |
+
self,
|
| 82 |
+
predictions,
|
| 83 |
+
references,
|
| 84 |
+
batch_size=32,
|
| 85 |
+
max_length=128,
|
| 86 |
+
device=None,
|
| 87 |
+
):
|
| 88 |
+
"""Returns the scores"""
|
| 89 |
+
assert len(predictions) == len(
|
| 90 |
+
references
|
| 91 |
+
), "predictions and references should have the same number of sentences."
|
| 92 |
+
if device is not None:
|
| 93 |
+
assert device in ["gpu", "cpu"], "device should be either gpu or cpu."
|
| 94 |
+
else:
|
| 95 |
+
device = "gpu" if torch.cuda.is_available() else "cpu"
|
| 96 |
+
training_args = TrainingArguments(
|
| 97 |
+
"trainer",
|
| 98 |
+
fp16=(device == "gpu"),
|
| 99 |
+
per_device_eval_batch_size=batch_size,
|
| 100 |
+
report_to="all",
|
| 101 |
+
no_cuda=(device == "cpu"),
|
| 102 |
+
log_level="warning",
|
| 103 |
+
)
|
| 104 |
+
dataset = {"sentence1": predictions, "sentence2": references}
|
| 105 |
+
raw_datasets = datasets.Dataset.from_dict(dataset)
|
| 106 |
+
|
| 107 |
+
def tokenize_function(data):
|
| 108 |
+
return self.tokenizer(
|
| 109 |
+
data["sentence1"], data["sentence2"], max_length=max_length, truncation=True, padding=True
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
|
| 113 |
+
tokenized_datasets.remove_columns(["sentence1", "sentence2"])
|
| 114 |
+
trainer = Trainer(self.model, training_args, tokenizer=self.tokenizer)
|
| 115 |
+
predictions = trainer.predict(tokenized_datasets)
|
| 116 |
+
return {"scores": list(predictions.predictions.squeeze(-1))}
|
testbed/huggingface__datasets/metrics/glue/README.md
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Metric Card for GLUE
|
| 2 |
+
|
| 3 |
+
## Metric description
|
| 4 |
+
This metric is used to compute the GLUE evaluation metric associated to each [GLUE dataset](https://huggingface.co/datasets/glue).
|
| 5 |
+
|
| 6 |
+
GLUE, the General Language Understanding Evaluation benchmark is a collection of resources for training, evaluating, and analyzing natural language understanding systems.
|
| 7 |
+
|
| 8 |
+
## How to use
|
| 9 |
+
|
| 10 |
+
There are two steps: (1) loading the GLUE metric relevant to the subset of the GLUE dataset being used for evaluation; and (2) calculating the metric.
|
| 11 |
+
|
| 12 |
+
1. **Loading the relevant GLUE metric** : the subsets of GLUE are the following: `sst2`, `mnli`, `mnli_mismatched`, `mnli_matched`, `qnli`, `rte`, `wnli`, `cola`,`stsb`, `mrpc`, `qqp`, and `hans`.
|
| 13 |
+
|
| 14 |
+
More information about the different subsets of the GLUE dataset can be found on the [GLUE dataset page](https://huggingface.co/datasets/glue).
|
| 15 |
+
|
| 16 |
+
2. **Calculating the metric**: the metric takes two inputs : one list with the predictions of the model to score and one lists of references for each translation.
|
| 17 |
+
|
| 18 |
+
```python
|
| 19 |
+
from datasets import load_metric
|
| 20 |
+
glue_metric = load_metric('glue', 'sst2')
|
| 21 |
+
references = [0, 1]
|
| 22 |
+
predictions = [0, 1]
|
| 23 |
+
results = glue_metric.compute(predictions=predictions, references=references)
|
| 24 |
+
```
|
| 25 |
+
## Output values
|
| 26 |
+
|
| 27 |
+
The output of the metric depends on the GLUE subset chosen, consisting of a dictionary that contains one or several of the following metrics:
|
| 28 |
+
|
| 29 |
+
`accuracy`: the proportion of correct predictions among the total number of cases processed, with a range between 0 and 1 (see [accuracy](https://huggingface.co/metrics/accuracy) for more information).
|
| 30 |
+
|
| 31 |
+
`f1`: the harmonic mean of the precision and recall (see [F1 score](https://huggingface.co/metrics/f1) for more information). Its range is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall.
|
| 32 |
+
|
| 33 |
+
`pearson`: a measure of the linear relationship between two datasets (see [Pearson correlation](https://huggingface.co/metrics/pearsonr) for more information). Its range is between -1 and +1, with 0 implying no correlation, and -1/+1 implying an exact linear relationship. Positive correlations imply that as x increases, so does y, whereas negative correlations imply that as x increases, y decreases.
|
| 34 |
+
|
| 35 |
+
`spearmanr`: a nonparametric measure of the monotonicity of the relationship between two datasets(see [Spearman Correlation](https://huggingface.co/metrics/spearmanr) for more information). `spearmanr` has the same range as `pearson`.
|
| 36 |
+
|
| 37 |
+
`matthews_correlation`: a measure of the quality of binary and multiclass classifications (see [Matthews Correlation](https://huggingface.co/metrics/matthews_correlation) for more information). Its range of values is between -1 and +1, where a coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction.
|
| 38 |
+
|
| 39 |
+
The `cola` subset returns `matthews_correlation`, the `stsb` subset returns `pearson` and `spearmanr`, the `mrpc` and `qqp` subsets return both `accuracy` and `f1`, and all other subsets of GLUE return only accuracy.
|
| 40 |
+
|
| 41 |
+
### Values from popular papers
|
| 42 |
+
The [original GLUE paper](https://huggingface.co/datasets/glue) reported average scores ranging from 58 to 64%, depending on the model used (with all evaluation values scaled by 100 to make computing the average possible).
|
| 43 |
+
|
| 44 |
+
For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/glue).
|
| 45 |
+
|
| 46 |
+
## Examples
|
| 47 |
+
|
| 48 |
+
Maximal values for the MRPC subset (which outputs `accuracy` and `f1`):
|
| 49 |
+
|
| 50 |
+
```python
|
| 51 |
+
from datasets import load_metric
|
| 52 |
+
glue_metric = load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
|
| 53 |
+
references = [0, 1]
|
| 54 |
+
predictions = [0, 1]
|
| 55 |
+
results = glue_metric.compute(predictions=predictions, references=references)
|
| 56 |
+
print(results)
|
| 57 |
+
{'accuracy': 1.0, 'f1': 1.0}
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
Minimal values for the STSB subset (which outputs `pearson` and `spearmanr`):
|
| 61 |
+
|
| 62 |
+
```python
|
| 63 |
+
from datasets import load_metric
|
| 64 |
+
glue_metric = load_metric('glue', 'stsb')
|
| 65 |
+
references = [0., 1., 2., 3., 4., 5.]
|
| 66 |
+
predictions = [-10., -11., -12., -13., -14., -15.]
|
| 67 |
+
results = glue_metric.compute(predictions=predictions, references=references)
|
| 68 |
+
print(results)
|
| 69 |
+
{'pearson': -1.0, 'spearmanr': -1.0}
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
Partial match for the COLA subset (which outputs `matthews_correlation`)
|
| 73 |
+
|
| 74 |
+
```python
|
| 75 |
+
from datasets import load_metric
|
| 76 |
+
glue_metric = load_metric('glue', 'cola')
|
| 77 |
+
references = [0, 1]
|
| 78 |
+
predictions = [1, 1]
|
| 79 |
+
results = glue_metric.compute(predictions=predictions, references=references)
|
| 80 |
+
results
|
| 81 |
+
{'matthews_correlation': 0.0}
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
## Limitations and bias
|
| 85 |
+
This metric works only with datasets that have the same format as the [GLUE dataset](https://huggingface.co/datasets/glue).
|
| 86 |
+
|
| 87 |
+
While the GLUE dataset is meant to represent "General Language Understanding", the tasks represented in it are not necessarily representative of language understanding, and should not be interpreted as such.
|
| 88 |
+
|
| 89 |
+
Also, while the GLUE subtasks were considered challenging during its creation in 2019, they are no longer considered as such given the impressive progress made since then. A more complex (or "stickier") version of it, called [SuperGLUE](https://huggingface.co/datasets/super_glue), was subsequently created.
|
| 90 |
+
|
| 91 |
+
## Citation
|
| 92 |
+
|
| 93 |
+
```bibtex
|
| 94 |
+
@inproceedings{wang2019glue,
|
| 95 |
+
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
|
| 96 |
+
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
|
| 97 |
+
note={In the Proceedings of ICLR.},
|
| 98 |
+
year={2019}
|
| 99 |
+
}
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
## Further References
|
| 103 |
+
|
| 104 |
+
- [GLUE benchmark homepage](https://gluebenchmark.com/)
|
| 105 |
+
- [Fine-tuning a model with the Trainer API](https://huggingface.co/course/chapter3/3?)
|
testbed/huggingface__datasets/metrics/glue/glue.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Datasets Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
""" GLUE benchmark metric. """
|
| 15 |
+
|
| 16 |
+
from scipy.stats import pearsonr, spearmanr
|
| 17 |
+
from sklearn.metrics import f1_score, matthews_corrcoef
|
| 18 |
+
|
| 19 |
+
import datasets
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
_CITATION = """\
|
| 23 |
+
@inproceedings{wang2019glue,
|
| 24 |
+
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
|
| 25 |
+
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
|
| 26 |
+
note={In the Proceedings of ICLR.},
|
| 27 |
+
year={2019}
|
| 28 |
+
}
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
_DESCRIPTION = """\
|
| 32 |
+
GLUE, the General Language Understanding Evaluation benchmark
|
| 33 |
+
(https://gluebenchmark.com/) is a collection of resources for training,
|
| 34 |
+
evaluating, and analyzing natural language understanding systems.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
_KWARGS_DESCRIPTION = """
|
| 38 |
+
Compute GLUE evaluation metric associated to each GLUE dataset.
|
| 39 |
+
Args:
|
| 40 |
+
predictions: list of predictions to score.
|
| 41 |
+
Each translation should be tokenized into a list of tokens.
|
| 42 |
+
references: list of lists of references for each translation.
|
| 43 |
+
Each reference should be tokenized into a list of tokens.
|
| 44 |
+
Returns: depending on the GLUE subset, one or several of:
|
| 45 |
+
"accuracy": Accuracy
|
| 46 |
+
"f1": F1 score
|
| 47 |
+
"pearson": Pearson Correlation
|
| 48 |
+
"spearmanr": Spearman Correlation
|
| 49 |
+
"matthews_correlation": Matthew Correlation
|
| 50 |
+
Examples:
|
| 51 |
+
|
| 52 |
+
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
|
| 53 |
+
>>> references = [0, 1]
|
| 54 |
+
>>> predictions = [0, 1]
|
| 55 |
+
>>> results = glue_metric.compute(predictions=predictions, references=references)
|
| 56 |
+
>>> print(results)
|
| 57 |
+
{'accuracy': 1.0}
|
| 58 |
+
|
| 59 |
+
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
|
| 60 |
+
>>> references = [0, 1]
|
| 61 |
+
>>> predictions = [0, 1]
|
| 62 |
+
>>> results = glue_metric.compute(predictions=predictions, references=references)
|
| 63 |
+
>>> print(results)
|
| 64 |
+
{'accuracy': 1.0, 'f1': 1.0}
|
| 65 |
+
|
| 66 |
+
>>> glue_metric = datasets.load_metric('glue', 'stsb')
|
| 67 |
+
>>> references = [0., 1., 2., 3., 4., 5.]
|
| 68 |
+
>>> predictions = [0., 1., 2., 3., 4., 5.]
|
| 69 |
+
>>> results = glue_metric.compute(predictions=predictions, references=references)
|
| 70 |
+
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
|
| 71 |
+
{'pearson': 1.0, 'spearmanr': 1.0}
|
| 72 |
+
|
| 73 |
+
>>> glue_metric = datasets.load_metric('glue', 'cola')
|
| 74 |
+
>>> references = [0, 1]
|
| 75 |
+
>>> predictions = [0, 1]
|
| 76 |
+
>>> results = glue_metric.compute(predictions=predictions, references=references)
|
| 77 |
+
>>> print(results)
|
| 78 |
+
{'matthews_correlation': 1.0}
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def simple_accuracy(preds, labels):
|
| 83 |
+
return float((preds == labels).mean())
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def acc_and_f1(preds, labels):
|
| 87 |
+
acc = simple_accuracy(preds, labels)
|
| 88 |
+
f1 = float(f1_score(y_true=labels, y_pred=preds))
|
| 89 |
+
return {
|
| 90 |
+
"accuracy": acc,
|
| 91 |
+
"f1": f1,
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def pearson_and_spearman(preds, labels):
|
| 96 |
+
pearson_corr = float(pearsonr(preds, labels)[0])
|
| 97 |
+
spearman_corr = float(spearmanr(preds, labels)[0])
|
| 98 |
+
return {
|
| 99 |
+
"pearson": pearson_corr,
|
| 100 |
+
"spearmanr": spearman_corr,
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
| 105 |
+
class Glue(datasets.Metric):
|
| 106 |
+
def _info(self):
|
| 107 |
+
if self.config_name not in [
|
| 108 |
+
"sst2",
|
| 109 |
+
"mnli",
|
| 110 |
+
"mnli_mismatched",
|
| 111 |
+
"mnli_matched",
|
| 112 |
+
"cola",
|
| 113 |
+
"stsb",
|
| 114 |
+
"mrpc",
|
| 115 |
+
"qqp",
|
| 116 |
+
"qnli",
|
| 117 |
+
"rte",
|
| 118 |
+
"wnli",
|
| 119 |
+
"hans",
|
| 120 |
+
]:
|
| 121 |
+
raise KeyError(
|
| 122 |
+
"You should supply a configuration name selected in "
|
| 123 |
+
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
|
| 124 |
+
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]'
|
| 125 |
+
)
|
| 126 |
+
return datasets.MetricInfo(
|
| 127 |
+
description=_DESCRIPTION,
|
| 128 |
+
citation=_CITATION,
|
| 129 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
| 130 |
+
features=datasets.Features(
|
| 131 |
+
{
|
| 132 |
+
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32"),
|
| 133 |
+
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32"),
|
| 134 |
+
}
|
| 135 |
+
),
|
| 136 |
+
codebase_urls=[],
|
| 137 |
+
reference_urls=[],
|
| 138 |
+
format="numpy",
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
def _compute(self, predictions, references):
|
| 142 |
+
if self.config_name == "cola":
|
| 143 |
+
return {"matthews_correlation": matthews_corrcoef(references, predictions)}
|
| 144 |
+
elif self.config_name == "stsb":
|
| 145 |
+
return pearson_and_spearman(predictions, references)
|
| 146 |
+
elif self.config_name in ["mrpc", "qqp"]:
|
| 147 |
+
return acc_and_f1(predictions, references)
|
| 148 |
+
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
|
| 149 |
+
return {"accuracy": simple_accuracy(predictions, references)}
|
| 150 |
+
else:
|
| 151 |
+
raise KeyError(
|
| 152 |
+
"You should supply a configuration name selected in "
|
| 153 |
+
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
|
| 154 |
+
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]'
|
| 155 |
+
)
|