Make sure evaluation metrics are computed correctly
Browse files
app.py
CHANGED
|
@@ -87,8 +87,11 @@ sentences_labels, sentences_predictions = [], []
|
|
| 87 |
for sample in tqdm(dataset):
|
| 88 |
text = sample["sentence"]
|
| 89 |
labels = [
|
| 90 |
-
1
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
| 92 |
]
|
| 93 |
pred = predict_top_p(text)
|
| 94 |
sentences_labels.append(labels)
|
|
|
|
| 87 |
for sample in tqdm(dataset):
|
| 88 |
text = sample["sentence"]
|
| 89 |
labels = [
|
| 90 |
+
1
|
| 91 |
+
if DIALECTS_WITH_LABELS[i] in sample.keys()
|
| 92 |
+
and int(sample[DIALECTS_WITH_LABELS[i]]) == 1
|
| 93 |
+
else 0
|
| 94 |
+
for i in range(len(DIALECTS_WITH_LABELS))
|
| 95 |
]
|
| 96 |
pred = predict_top_p(text)
|
| 97 |
sentences_labels.append(labels)
|