Elad commited on
Commit ·
646a69c
1
Parent(s): ced174a
remove unused metrics
Browse files- metrics/bart_score.py +0 -53
- metrics/bleu.py +0 -47
metrics/bart_score.py
DELETED
|
@@ -1,53 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import numpy as np
|
| 3 |
-
from BARTScore.bart_score import BARTScorer
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def get_scorers():
|
| 7 |
-
assert os.path.isfile(
|
| 8 |
-
os.path.join("BARTScore", "bart.pth")
|
| 9 |
-
), "You must download `bart.pth` to use BARTScore.\nUse `gdown --id 1_7JfF7KOInb7ZrxKHIigTMR4ChVET01m --output bart.pth`"
|
| 10 |
-
|
| 11 |
-
scorers = {}
|
| 12 |
-
|
| 13 |
-
scorers["vanilla"] = BARTScorer(device="cuda:0", checkpoint="facebook/bart-large")
|
| 14 |
-
|
| 15 |
-
scorers["cnn"] = BARTScorer(device="cuda:0", checkpoint="facebook/bart-large-cnn")
|
| 16 |
-
|
| 17 |
-
# for the parabank model, first init a bart model, then load the local para model from BARTScore/bart.pth
|
| 18 |
-
# see the documentation from https://github.com/neulab/BARTScore for reference
|
| 19 |
-
scorers["para"] = BARTScorer(device="cuda:0", checkpoint="facebook/bart-large-cnn")
|
| 20 |
-
scorers["para"].load(path="BARTScore/bart.pth")
|
| 21 |
-
|
| 22 |
-
return scorers
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
def compute_bart_score_for_scorer(predictions, references, scorer_name, scorer):
|
| 26 |
-
#precisions = np.array(scorer.score(references, predictions, batch_size=4))
|
| 27 |
-
recalls = np.array(scorer.score(predictions, references, batch_size=4))
|
| 28 |
-
#f_scores = 0.5 * (precisions + recalls)
|
| 29 |
-
baselines = np.array(scorer.score(references, references, batch_size=4))
|
| 30 |
-
normalized = baselines / recalls
|
| 31 |
-
diffs = recalls - baselines
|
| 32 |
-
expdiffs = np.exp(diffs)
|
| 33 |
-
|
| 34 |
-
return [
|
| 35 |
-
{
|
| 36 |
-
#f"{scorer_name}_f_score": f_scores[i],
|
| 37 |
-
#f"{scorer_name}_precision": precisions[i],
|
| 38 |
-
f"{scorer_name}_recall": recalls[i],
|
| 39 |
-
f"{scorer_name}_normalized": normalized[i],
|
| 40 |
-
f"{scorer_name}_diffs": diffs[i],
|
| 41 |
-
f"{scorer_name}_expdiffs": expdiffs[i],
|
| 42 |
-
}
|
| 43 |
-
for i in range(len(predictions))
|
| 44 |
-
]
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
def compute_bart_score(predictions, references, scorers):
|
| 48 |
-
result = [{} for _ in range(len(predictions))]
|
| 49 |
-
for scorer_name, scorer in scorers.items():
|
| 50 |
-
scorer_result = compute_bart_score_for_scorer(predictions, references, scorer_name, scorer)
|
| 51 |
-
for i, element in enumerate(scorer_result):
|
| 52 |
-
result[i].update(element)
|
| 53 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
metrics/bleu.py
DELETED
|
@@ -1,47 +0,0 @@
|
|
| 1 |
-
# Copied from https://github.com/huggingface/datasets/blob/76bb45964df1e62d1411b0a9e9fc673e9a791c9a/metrics/sacrebleu/sacrebleu.py
|
| 2 |
-
|
| 3 |
-
from copy import deepcopy
|
| 4 |
-
from sacrebleu.metrics import BLEU
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
def compute_bleu(
|
| 8 |
-
predictions,
|
| 9 |
-
references,
|
| 10 |
-
smooth_method="exp",
|
| 11 |
-
smooth_value=None,
|
| 12 |
-
force=False,
|
| 13 |
-
lowercase=False,
|
| 14 |
-
tokenize=None,
|
| 15 |
-
effective_order=False,
|
| 16 |
-
):
|
| 17 |
-
references_per_prediction = len(references[0])
|
| 18 |
-
if any(len(refs) != references_per_prediction for refs in references):
|
| 19 |
-
references = deepcopy(references)
|
| 20 |
-
max_references_per_prediction = max(len(refs) for refs in references)
|
| 21 |
-
for refs in references:
|
| 22 |
-
refs.extend([None] * (max_references_per_prediction - len(refs)))
|
| 23 |
-
|
| 24 |
-
transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)]
|
| 25 |
-
|
| 26 |
-
bleu = BLEU(
|
| 27 |
-
smooth_method=smooth_method,
|
| 28 |
-
smooth_value=smooth_value,
|
| 29 |
-
force=force,
|
| 30 |
-
lowercase=lowercase,
|
| 31 |
-
effective_order=effective_order,
|
| 32 |
-
**(dict(tokenize=tokenize) if tokenize else {}),
|
| 33 |
-
)
|
| 34 |
-
output = bleu.corpus_score(
|
| 35 |
-
predictions,
|
| 36 |
-
transformed_references,
|
| 37 |
-
)
|
| 38 |
-
output_dict = {
|
| 39 |
-
"score": output.score,
|
| 40 |
-
**{f"counts-{i+1}": round(p, 4) for i, p in enumerate(output.counts)},
|
| 41 |
-
**{f"totals-{i+1}": round(p, 4) for i, p in enumerate(output.totals)},
|
| 42 |
-
**{f"precision-{i+1}": round(p, 4) for i, p in enumerate(output.precisions)},
|
| 43 |
-
"bp": output.bp,
|
| 44 |
-
"sys_len": output.sys_len,
|
| 45 |
-
"ref_len": output.ref_len,
|
| 46 |
-
}
|
| 47 |
-
return output_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|