Update README
Browse files- README.md +9 -0
- code_bleu.py +2 -1
README.md
CHANGED
|
@@ -10,3 +10,12 @@ pinned: false
|
|
| 10 |
---
|
| 11 |
|
| 12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
| 13 |
+
|
| 14 |
+
# Create a new evaluation module in HuggingFace space
|
| 15 |
+
Use the evaluate-cli to create a new module
|
| 16 |
+
|
| 17 |
+
```
|
| 18 |
+
evaluate-cli create "Metric name" --module_type "metric"
|
| 19 |
+
```
|
| 20 |
+
This will create a new space on the HuggingFace hub. You can then clone the space locally and add the code for the computation of the metric.<br/>
|
| 21 |
+
More information on: https://huggingface.co/docs/evaluate/main/en/creating_and_sharing#requirements
|
code_bleu.py
CHANGED
|
@@ -79,5 +79,6 @@ class CodeBleu(evaluate.Metric):
|
|
| 79 |
return result["codebleu"]
|
| 80 |
|
| 81 |
def _compute(self, references, predictions):
|
| 82 |
-
average_codebleu_score = sum([
|
|
|
|
| 83 |
return {"codebleu_score": average_codebleu_score}
|
|
|
|
| 79 |
return result["codebleu"]
|
| 80 |
|
| 81 |
def _compute(self, references, predictions):
|
| 82 |
+
average_codebleu_score = sum([push
|
| 83 |
+
(r, p) for r, p in zip(references, predictions)])/len(references)
|
| 84 |
return {"codebleu_score": average_codebleu_score}
|