Rodrigo Ferreira Rodrigues commited on
Commit
5ce0b20
·
1 Parent(s): 8bfb453

add module default template

Browse files
Files changed (5) hide show
  1. README.md +44 -6
  2. app.py +6 -0
  3. place_gen_evaluate.py +127 -0
  4. requirements.txt +7 -0
  5. tests.py +17 -0
README.md CHANGED
@@ -1,12 +1,50 @@
1
  ---
2
- title: Place Gen Evaluate
3
- emoji: 😻
4
- colorFrom: green
5
- colorTo: indigo
 
 
 
6
  sdk: gradio
7
- sdk_version: 6.5.1
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Place_gen_evaluate
3
+ datasets:
4
+ - GeoBenchmark
5
+ tags:
6
+ - evaluate
7
+ - metric
8
+ description: "TODO: add a description here"
9
  sdk: gradio
10
+ sdk_version: 3.19.1
11
  app_file: app.py
12
  pinned: false
13
  ---
14
 
15
+ # Metric Card for Place_gen_evaluate
16
+
17
+ ***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing metric cards if you'd like examples.*
18
+
19
+ ## Metric Description
20
+ *Give a brief overview of this metric, including what task(s) it is usually used for, if any.*
21
+
22
+ ## How to Use
23
+ *Give general statement of how to use the metric*
24
+
25
+ *Provide simplest possible example for using the metric*
26
+
27
+ ### Inputs
28
+ *List all input arguments in the format below*
29
+ - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
30
+
31
+ ### Output Values
32
+
33
+ *Explain what this metric outputs and provide an example of what the metric output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}*
34
+
35
+ *State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."*
36
+
37
+ #### Values from Popular Papers
38
+ *Give examples, preferrably with links to leaderboards or publications, to papers that have reported this metric, along with the values they have reported.*
39
+
40
+ ### Examples
41
+ *Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
42
+
43
+ ## Limitations and Bias
44
+ *Note any known limitations or biases that the metric has, with links and references if possible.*
45
+
46
+ ## Citation
47
+ *Cite the source where this metric was introduced.*
48
+
49
+ ## Further References
50
+ *Add any useful further references.*
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("rfr2003/place_gen_evaluate")
6
+ launch_gradio_widget(module)
place_gen_evaluate.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """TODO: Add a description here."""
15
+
16
+ import evaluate
17
+ import datasets
18
+ import numpy as np
19
+ import ast
20
+ import re
21
+
22
+ # TODO: Add BibTeX citation
23
+ _CITATION = """\
24
+ @InProceedings{huggingface:module,
25
+ title = {A great new module},
26
+ authors={huggingface, Inc.},
27
+ year={2020}
28
+ }
29
+ """
30
+
31
+ # TODO: Add description of the module here
32
+ _DESCRIPTION = """\
33
+ This new module is designed to solve this great ML task and is crafted with a lot of care.
34
+ """
35
+
36
+
37
+ # TODO: Add description of the arguments of the module here
38
+ _KWARGS_DESCRIPTION = """
39
+ Calculates how good are predictions given some references, using certain scores
40
+ Args:
41
+ predictions: list of predictions to score. Each predictions
42
+ should be a string with tokens separated by spaces.
43
+ references: list of reference for each prediction. Each
44
+ reference should be a string with tokens separated by spaces.
45
+ Returns:
46
+ accuracy: description of the first score,
47
+ another_score: description of the second score,
48
+ Examples:
49
+ Examples should be written in doctest format, and should illustrate how
50
+ to use the function.
51
+
52
+ >>> my_new_module = evaluate.load("my_new_module")
53
+ >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
54
+ >>> print(results)
55
+ {'accuracy': 1.0}
56
+ """
57
+
58
+
59
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
60
+ class Place_gen_evaluate(evaluate.Metric):
61
+ """TODO: Short description of my evaluation module."""
62
+
63
+ def _info(self):
64
+ # TODO: Specifies the evaluate.EvaluationModuleInfo object
65
+ return evaluate.MetricInfo(
66
+ # This is the description that will appear on the modules page.
67
+ module_type="metric",
68
+ description=_DESCRIPTION,
69
+ citation=_CITATION,
70
+ inputs_description=_KWARGS_DESCRIPTION,
71
+ # This defines the format of each prediction and reference
72
+ features=datasets.Features({
73
+ 'generations': datasets.Value('int64'),
74
+ 'golds': datasets.Sequence(datasets.Value('int64')),
75
+ }),
76
+ )
77
+
78
+ def _download_and_prepare(self, dl_manager):
79
+ """Optional: download external resources useful to compute the scores"""
80
+ # TODO: Download external resources if needed
81
+ self.bleu = evaluate.load('bleu')
82
+ self.bert_score = evaluate.load('bertscore')
83
+
84
+ def _compute(self, generations, golds):
85
+ '''Calculate Accuracy and BLEU-1 scores between model generations and golden answers.
86
+ We expect a set of generated answers and want to find it among a set of gold answers.'''
87
+ assert len(generations) == len(golds)
88
+ assert isinstance(golds, list)
89
+
90
+ predictions, references = [], []
91
+ precisions, recalls, means_pre_rec = [], [], []
92
+
93
+ for gen, gold in zip(generations, golds):
94
+ gen = gen.strip().lower()
95
+ #each gold must be a list of str
96
+ for i in range(len(gold)):
97
+ gold[i] = gold[i].lower().strip()
98
+
99
+ try:
100
+ f_ans = ast.literal_eval(gen)
101
+ except:
102
+ subs = ['[', ']', '(', ')', ' ']
103
+ pattern = r'(' + '|'.join(map(re.escape, subs)) + r')'
104
+ f_ans = re.sub(pattern, "", gen).split(',')
105
+
106
+ f_ans = list(set([a.lower().strip() for a in f_ans])) #get rid of duples values
107
+ f_gold = list(set(gold))
108
+
109
+ precision, recall = calculate_pre_rec(f_ans, f_gold, Levenshtein.distance)
110
+
111
+ precisions.append(precision)
112
+ recalls.append(recall)
113
+ means_pre_rec.append((precision+recall)/2)
114
+
115
+ predictions.append(gen)
116
+ references.append(gold)
117
+
118
+ metrics = {f"bert_score_{k}":np.mean(v).item() for k,v in bert_score.compute(predictions=predictions, references=references, lang="en").items() if k in ['recall', 'precision', 'f1']}
119
+ metrics.update({
120
+ 'bleu-1': bleu.compute(predictions=predictions, references=references, max_order=1)['bleu'],
121
+ 'macro-precision': np.mean(precisions).item(),
122
+ 'macro-rappel': np.mean(recalls).item(),
123
+ 'macro-mean': np.mean(means_pre_rec).item(),
124
+ 'median absolute error': median(means_pre_rec)
125
+ })
126
+
127
+ return metrics
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/evaluate@main
2
+ transformers
3
+ torch
4
+ datasets
5
+ numpy
6
+ bert_score
7
+ levenshtein
tests.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_cases = [
2
+ {
3
+ "predictions": [0, 0],
4
+ "references": [1, 1],
5
+ "result": {"metric_score": 0}
6
+ },
7
+ {
8
+ "predictions": [1, 1],
9
+ "references": [1, 1],
10
+ "result": {"metric_score": 1}
11
+ },
12
+ {
13
+ "predictions": [1, 0],
14
+ "references": [1, 1],
15
+ "result": {"metric_score": 0.5}
16
+ }
17
+ ]