Rodrigo Ferreira Rodrigues commited on
Commit
cee9920
·
1 Parent(s): 17ae225

add module default template

Browse files
Files changed (5) hide show
  1. README.md +44 -6
  2. app.py +6 -0
  3. regression_evaluate.py +130 -0
  4. requirements.txt +2 -0
  5. tests.py +7 -0
README.md CHANGED
@@ -1,12 +1,50 @@
1
  ---
2
- title: Regression Evaluate
3
- emoji: 📉
4
- colorFrom: green
5
- colorTo: indigo
 
 
 
6
  sdk: gradio
7
- sdk_version: 6.5.1
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: regression_evaluate
3
+ datasets:
4
+ - GeoBenchmark
5
+ tags:
6
+ - evaluate
7
+ - metric
8
+ description: "TODO: add a description here"
9
  sdk: gradio
10
+ sdk_version: 3.19.1
11
  app_file: app.py
12
  pinned: false
13
  ---
14
 
15
+ # Metric Card for regression_evaluate
16
+
17
+ ***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing metric cards if you'd like examples.*
18
+
19
+ ## Metric Description
20
+ *Give a brief overview of this metric, including what task(s) it is usually used for, if any.*
21
+
22
+ ## How to Use
23
+ *Give general statement of how to use the metric*
24
+
25
+ *Provide simplest possible example for using the metric*
26
+
27
+ ### Inputs
28
+ *List all input arguments in the format below*
29
+ - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
30
+
31
+ ### Output Values
32
+
33
+ *Explain what this metric outputs and provide an example of what the metric output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}*
34
+
35
+ *State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."*
36
+
37
+ #### Values from Popular Papers
38
+ *Give examples, preferrably with links to leaderboards or publications, to papers that have reported this metric, along with the values they have reported.*
39
+
40
+ ### Examples
41
+ *Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
42
+
43
+ ## Limitations and Bias
44
+ *Note any known limitations or biases that the metric has, with links and references if possible.*
45
+
46
+ ## Citation
47
+ *Cite the source where this metric was introduced.*
48
+
49
+ ## Further References
50
+ *Add any useful further references.*
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("rfr2003/regression_evaluate")
6
+ launch_gradio_widget(module)
regression_evaluate.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """TODO: Add a description here."""
15
+
16
+ import evaluate
17
+ import datasets
18
+ import re
19
+ from statistics import median
20
+ import numpy as np
21
+
22
+ # TODO: Add BibTeX citation
23
+ _CITATION = """\
24
+ @InProceedings{huggingface:module,
25
+ title = {A great new module},
26
+ authors={huggingface, Inc.},
27
+ year={2020}
28
+ }
29
+ """
30
+
31
+ # TODO: Add description of the module here
32
+ _DESCRIPTION = """\
33
+ This metric aims to evaluate regression tasks done by LMs.
34
+ """
35
+
36
+
37
+ # TODO: Add description of the arguments of the module here
38
+ _KWARGS_DESCRIPTION = """
39
+ Calculates Accuracy and Blue-1 between generations and gold answers in a MCQ context.
40
+ Args:
41
+ generations: list of predictions to score. Each predictions
42
+ should be a string generated by a LM model.
43
+ golds: list of reference for each prediction. Each
44
+ reference should be a list of floats.
45
+ Returns:
46
+ precision: ,
47
+ recall:
48
+ Examples:
49
+ Here is an exemple on how to use the metric:
50
+
51
+ >>> metric = evaluate.load("rfr2003/regression_evaluate")
52
+ >>> results = metric.compute(generations=['[150, 0]'], golds=[183, 177, 146, 85, 70, 78, 55, 17, 0, -1, -1])
53
+ >>> print(results)
54
+ {'precision': 4.0, 'recall': 345.0, 'macro-mean': 174.5, 'median macro-mean': 174.5}
55
+ """
56
+
57
+
58
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
59
+ class regression_evaluate(evaluate.Metric):
60
+ """TODO: Short description of my evaluation module."""
61
+
62
+ def _info(self):
63
+ # TODO: Specifies the evaluate.EvaluationModuleInfo object
64
+ return evaluate.MetricInfo(
65
+ # This is the description that will appear on the modules page.
66
+ module_type="metric",
67
+ description=_DESCRIPTION,
68
+ citation=_CITATION,
69
+ inputs_description=_KWARGS_DESCRIPTION,
70
+ # This defines the format of each prediction and reference
71
+ features=datasets.Features({
72
+ 'generations': datasets.Value('string'),
73
+ 'golds': datasets.Sequence(datasets.Value('float32')),
74
+ }),
75
+ )
76
+
77
+ def _download_and_prepare(self, dl_manager):
78
+ """Optional: download external resources useful to compute the scores"""
79
+ pass
80
+
81
+ def _calculate_pre_rec(self, gens, golds, d):
82
+ dists = []
83
+
84
+ for gold in golds:
85
+ g_dist = []
86
+ for gen in gens:
87
+ g_dist.append(d(gold, gen))
88
+ if len(g_dist) == 0:
89
+ g_dist.append(100) #penalty if the model doesnt generate anything
90
+ dists.append(g_dist)
91
+
92
+ dists = np.array(dists)
93
+ precision = np.min(dists, axis=0).sum()
94
+
95
+ recall = np.min(dists, axis=1).sum()
96
+
97
+ return precision, recall
98
+
99
+ def _compute(self, generations, golds):
100
+ assert len(generations) == len(golds)
101
+ assert isinstance(golds, list)
102
+
103
+ precisions, recalls, means_pre_rec = [], [], []
104
+
105
+ for gen, gold in zip(generations, golds):
106
+ f_gold = list(set([float(g) for g in gold]))
107
+
108
+ f_ans = re.findall(r'\d+(?:\.\d+)?', gen)
109
+
110
+ f_ans = list(set([float(a) for a in f_ans])) #get rid of duples values
111
+
112
+ precision, recall = self._calculate_pre_rec(f_ans, f_gold, lambda x,y: abs(x-y))
113
+
114
+ precisions.append(precision)
115
+ recalls.append(recall)
116
+ means_pre_rec.append((precision+recall)/2)
117
+
118
+
119
+ macro_prec = np.mean(precisions).item()
120
+ macro_rec = np.mean(recalls).item()
121
+
122
+ metrics = {}
123
+ metrics.update({
124
+ 'precision': np.mean(precisions).item(),
125
+ 'recall': np.mean(recalls).item(),
126
+ 'macro-mean': np.mean(means_pre_rec).item(),
127
+ 'median macro-mean': median(means_pre_rec)
128
+ })
129
+
130
+ return metrics
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ git+https://github.com/huggingface/evaluate@main
2
+ numpy
tests.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ test_cases = [
2
+ {
3
+ 'generations': ['[150, 0]'],
4
+ 'golds': [183, 177, 146, 85, 70, 78, 55, 17, 0, -1, -1],
5
+ "result": {'precision': 4.0, 'recall': 345.0, 'macro-mean': 174.5, 'median macro-mean': 174.5}
6
+ }
7
+ ]