Create training/utils.py
Browse files- training/utils.py +29 -0
training/utils.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Dict
|
| 2 |
+
import numpy as np
|
| 3 |
+
import evaluate
|
| 4 |
+
|
| 5 |
+
def compute_metrics_sentiment(eval_pred):
|
| 6 |
+
logits, labels = eval_pred
|
| 7 |
+
preds = np.argmax(logits, axis=-1)
|
| 8 |
+
acc = (preds == labels).mean().item()
|
| 9 |
+
return {"accuracy": acc}
|
| 10 |
+
|
| 11 |
+
def compute_metrics_ner(eval_pred, label_list: List[str]):
|
| 12 |
+
seqeval = evaluate.load("seqeval")
|
| 13 |
+
logits, labels = eval_pred
|
| 14 |
+
preds = logits.argmax(-1)
|
| 15 |
+
true_preds = [
|
| 16 |
+
[label_list[p] for (p, l) in zip(pred, lab) if l != -100]
|
| 17 |
+
for pred, lab in zip(preds, labels)
|
| 18 |
+
]
|
| 19 |
+
true_labels = [
|
| 20 |
+
[label_list[l] for (p, l) in zip(pred, lab) if l != -100]
|
| 21 |
+
for pred, lab in zip(preds, labels)
|
| 22 |
+
]
|
| 23 |
+
results = seqeval.compute(predictions=true_preds, references=true_labels)
|
| 24 |
+
return {
|
| 25 |
+
"precision": results["overall_precision"],
|
| 26 |
+
"recall": results["overall_recall"],
|
| 27 |
+
"f1": results["overall_f1"],
|
| 28 |
+
"accuracy": results["overall_accuracy"],
|
| 29 |
+
}
|