AMR-KELEG commited on
Commit
e61309d
·
1 Parent(s): 55afb0a

Make sure evaluation metrics are computed correctly

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -87,8 +87,11 @@ sentences_labels, sentences_predictions = [], []
87
  for sample in tqdm(dataset):
88
  text = sample["sentence"]
89
  labels = [
90
- 1 if DIALECTS[i] in sample.keys() and int(sample[DIALECTS[i]]) == 1 else 0
91
- for i in range(len(DIALECTS))
 
 
 
92
  ]
93
  pred = predict_top_p(text)
94
  sentences_labels.append(labels)
 
87
  for sample in tqdm(dataset):
88
  text = sample["sentence"]
89
  labels = [
90
+ 1
91
+ if DIALECTS_WITH_LABELS[i] in sample.keys()
92
+ and int(sample[DIALECTS_WITH_LABELS[i]]) == 1
93
+ else 0
94
+ for i in range(len(DIALECTS_WITH_LABELS))
95
  ]
96
  pred = predict_top_p(text)
97
  sentences_labels.append(labels)