Rodrigo Ferreira Rodrigues commited on
Commit
5f14629
·
1 Parent(s): 5f3a087

add metric functions and tests

Browse files

aaa

add metric function and tests

Files changed (5) hide show
  1. README.md +44 -6
  2. app.py +6 -0
  3. coord_eval.py +139 -0
  4. requirements.txt +1 -0
  5. tests.py +14 -0
README.md CHANGED
@@ -1,12 +1,50 @@
1
  ---
2
- title: Coord Eval
3
- emoji: 📚
4
- colorFrom: gray
5
- colorTo: red
 
 
 
6
  sdk: gradio
7
- sdk_version: 6.5.1
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Coord_eval
3
+ datasets:
4
+ - GeoBenchmark
5
+ tags:
6
+ - evaluate
7
+ - metric
8
+ description: "TODO: add a description here"
9
  sdk: gradio
10
+ sdk_version: 3.19.1
11
  app_file: app.py
12
  pinned: false
13
  ---
14
 
15
+ # Metric Card for Coord_eval
16
+
17
+ ***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing metric cards if you'd like examples.*
18
+
19
+ ## Metric Description
20
+ *Give a brief overview of this metric, including what task(s) it is usually used for, if any.*
21
+
22
+ ## How to Use
23
+ *Give general statement of how to use the metric*
24
+
25
+ *Provide simplest possible example for using the metric*
26
+
27
+ ### Inputs
28
+ *List all input arguments in the format below*
29
+ - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
30
+
31
+ ### Output Values
32
+
33
+ *Explain what this metric outputs and provide an example of what the metric output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}*
34
+
35
+ *State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."*
36
+
37
+ #### Values from Popular Papers
38
+ *Give examples, preferrably with links to leaderboards or publications, to papers that have reported this metric, along with the values they have reported.*
39
+
40
+ ### Examples
41
+ *Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
42
+
43
+ ## Limitations and Bias
44
+ *Note any known limitations or biases that the metric has, with links and references if possible.*
45
+
46
+ ## Citation
47
+ *Cite the source where this metric was introduced.*
48
+
49
+ ## Further References
50
+ *Add any useful further references.*
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("rfr2003/coord_eval")
6
+ launch_gradio_widget(module)
coord_eval.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """TODO: Add a description here."""
15
+
16
+ import evaluate
17
+ import datasets
18
+ import re
19
+
20
+
21
+ # TODO: Add BibTeX citation
22
+ _CITATION = """\
23
+ @InProceedings{huggingface:module,
24
+ title = {A great new module},
25
+ authors={huggingface, Inc.},
26
+ year={2020}
27
+ }
28
+ """
29
+
30
+ # TODO: Add description of the module here
31
+ _DESCRIPTION = """\
32
+ This metric aims to compute a coordinate accuracy between coordinates generated by an LM and a golden one. A pair of coordinates is considered correct if its haversine distance to the golden coordinates is inferior to a threeshold d.
33
+ """
34
+
35
+
36
+ # TODO: Add description of the arguments of the module here
37
+ _KWARGS_DESCRIPTION = """
38
+ Calculates how good are predictions given some references, using certain scores
39
+ Args:
40
+ generations: list of predictions to score. Each predictions
41
+ should be a string generated by a LM model.
42
+ golds: list of reference for each prediction. Each
43
+ reference should be a list of two floats corresponding to the latitude and longitude of the ground truth (eg. [12.8, 76.9]).
44
+ Returns:
45
+ accuracy: 1 if coordinates predicted are d distant from gold ones, O otherwise.
46
+ Examples:
47
+ >>> my_new_module = evaluate.load("rfr2003/coord_eval")
48
+ >>> results = my_new_module.compute(references=["(12.7, 67.8)", "(16.7, 89.6)"], predictions=[[12.7, 67.8], [10.9, 80.6]], d=20)
49
+ >>> print(results)
50
+ {'coord_accuracy': 0.5}
51
+ """
52
+
53
+ # TODO: Define external resources urls if needed
54
+ BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
55
+
56
+
57
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
58
+ class Coord_eval(evaluate.Metric):
59
+ """TODO: Short description of my evaluation module."""
60
+
61
+ def _info(self):
62
+ # TODO: Specifies the evaluate.EvaluationModuleInfo object
63
+ return evaluate.MetricInfo(
64
+ # This is the description that will appear on the modules page.
65
+ module_type="metric",
66
+ description=_DESCRIPTION,
67
+ citation=_CITATION,
68
+ inputs_description=_KWARGS_DESCRIPTION,
69
+ # This defines the format of each prediction and reference
70
+ features=datasets.Features({
71
+ 'generations': datasets.Value('string'),
72
+ 'golds': datasets.Value('float32'),
73
+ }),
74
+ # Homepage of the module for documentation
75
+ homepage="http://module.homepage",
76
+ # Additional links to the codebase or references
77
+ codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
78
+ reference_urls=["http://path.to.reference.url/new_module"]
79
+ )
80
+
81
+ def _download_and_prepare(self, dl_manager):
82
+ """Optional: download external resources useful to compute the scores"""
83
+ # TODO: Download external resources if needed
84
+ pass
85
+
86
+ def _haversine_distance(self, coord1, coord2):
87
+ lat1, lon1 = coord1
88
+ lat2, lon2 = coord2
89
+
90
+ # Convert degrees to radians
91
+ lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
92
+
93
+ dlat = lat2 - lat1
94
+ dlon = lon2 - lon1
95
+
96
+ # Haversine formula
97
+ a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
98
+ c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
99
+ R = 6371.0
100
+
101
+ return R * c
102
+
103
+ def _accuracy_coord(self, gen, gold, d_range=20):
104
+ # 1 if gen is in a range of d_range km of gold
105
+ d = self._haversine_distance(gold, gen)
106
+ return int(d <= d_range)
107
+
108
+ def _compute(self, generations, golds, d_range=20):
109
+ assert len(generations) == len(golds)
110
+ assert isinstance(golds, list)
111
+
112
+ correct, total = 0, 0
113
+
114
+ for gen, gold in zip(generations, golds):
115
+ # Each gold must be at the format : [lat, long]
116
+ assert len(gold) == 2
117
+ f_gold = (float(gold[0]), float(gold[1]))
118
+
119
+ try:
120
+ f_ans = ast.literal_eval(gen)
121
+ correct += self._accuracy_coord(f_ans, f_gold, d_range)
122
+
123
+ except:
124
+ pattern = r'[\(\[]\s*(\d+((,|\s|\.)*\d+)*)\s*[, ]\s*(\d+((,|\s|\.)*\d+)*)\s*[\)\]]'
125
+ matches = re.findall(pattern, gen)
126
+
127
+ if matches:
128
+ match = matches[0]
129
+ f_ans = (float(match[0].replace(',', '.').replace(' ', '')), float(match[3].replace(',', '.').replace(' ', '')))
130
+ correct += accuracy_coord(f_ans, f_gold, d_range)
131
+
132
+ total += 1
133
+
134
+ metrics = {}
135
+ metrics.update({
136
+ 'coord_ accuracy': correct/total,
137
+ })
138
+
139
+ return metrics
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ git+https://github.com/huggingface/evaluate@main
tests.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_cases = [
2
+ {
3
+ "predictions": ["(12.7, 67.8)", "(16.7, 89.6)"],
4
+ "references": [[12.7, 67.8], [10.9, 80.6]],
5
+ "d": 20,
6
+ "result": {"coord_accuracy": 0.5}
7
+ },
8
+ {
9
+ "predictions": ["(12.7, 67.8)", "(16.7, 89.6)"],
10
+ "references": [[12.7, 67.8], [10.9, 80.6]],
11
+ "d": 1000000,
12
+ "result": {"coord_accuracy": 1}
13
+ },
14
+ ]