File size: 5,352 Bytes
98d1657 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 | from datasets import Metric
class CUEBenchMetric(Metric):
def _info(self):
return {
"description": "F1, Precision, and Recall for multi-label set prediction in CUEBench",
"inputs_description": "List of predicted and reference class sets",
"citation": "",
}
def _MeanReciprocalRank(self, predicted, target):
if not predicted or not target:
return 0
predicted = [str(p).lower() for p in predicted]
target = [str(t).lower() for t in target]
for i, p in enumerate(predicted):
if p in target:
return 1 / (i + 1)
return 0
def _Hits_at_K(self, predicted, target, k):
if not predicted or not target:
return 0
predicted = [str(p).lower() for p in predicted]
target = [str(t).lower() for t in target]
return sum(1 for p in predicted[:k] if p in target)
def _coverage(self, _pd_Res, _eGold, _scores=None):
"""
Evaluate predictions (_pd_Res) against gold labels (_eGold).
Optionally, pass _scores (same length as _pd_Res) if you want to track prediction scores.
Returns:
res: [cov@len(_eGold), cov@1, cov@3, cov@5, rank_first_gold]
l_gold_pred: (_eGold, (top_predicted_labels, top_scores)) at len(_eGold)
"""
res = {}
l_gold_pred = ()
if not _pd_Res or not _eGold:
for k in [1, 3, 5, 10]:
res[k] = 0
# res.append(rank_first_gold)
return res, l_gold_pred
all_labels = _pd_Res
# Check if there's any overlap between predicted and gold labels
if set(_eGold) & set(all_labels):
# Find the 1-based rank of the first correct prediction
rank_first_gold = min([r + 1 for r, l in enumerate(all_labels) if l in _eGold])
for k in [1, 3, 5, 10]:
top_k_labels = all_labels[:k]
overlap = set(top_k_labels) & set(_eGold)
cov_k = len(overlap) / k
res[k] = (cov_k)
if k >= len(_eGold):
top_scores = _scores[:k] if _scores else None
l_gold_pred = (_eGold, (top_k_labels, top_scores))
# res.append(rank_first_gold)
return res, l_gold_pred
else:
for k in [1, 3, 5, 10]:
res[k] = 0
# res.append(rank_first_gold)
return res, l_gold_pred
def _clean(self, strings):
cleaned = []
for s in strings:
# Remove all asterisks and extra whitespace first
s = s.replace('*', '').strip()
# Remove surrounding quotes if they match (both single or both double)
if (s.startswith("'") and s.endswith("'")) or (s.startswith('"') and s.endswith('"')):
s = s[1:-1]
# Remove square brackets
s = s.replace('[', '').replace(']', '')
# Handle colon case - take the part after last colon and clean it
if ':' in s:
s = s.split(':')[-1]
# Final cleanup - remove any remaining special chars and whitespace
s = s.strip(' _\\"\'')
cleaned.append(s)
return cleaned
def _compute(self, outputs):
for i in range(len(outputs)):
outputs[i]['predicted_classes'] = self._clean(outputs[i]['predicted_classes'])
average_mrr = 0
for i in outputs:
average_mrr += self._MeanReciprocalRank(i['predicted_classes'], i['target_classes'])
average_mrr = average_mrr / len(outputs)
hits_at_1 = 0
hits_at_3 = 0
hits_at_5 = 0
hits_at_10 = 0
for i in outputs:
hits_at_1 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 1) > 0 else 0
hits_at_3 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 3) > 0 else 0
hits_at_5 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 5) > 0 else 0
hits_at_10 += 1 if self._Hits_at_K(i['predicted_classes'], i['target_classes'], 10) > 0 else 0
hits_at_1 = hits_at_1 / len(outputs)
hits_at_3 = hits_at_3 / len(outputs)
hits_at_5 = hits_at_5 / len(outputs)
hits_at_10 = hits_at_10 / len(outputs)
cov_1 = 0
cov_3 = 0
cov_5 = 0
cov_10 = 0
for i in outputs:
res, l_gold_pred = self._coverage(i['predicted_classes'], i['target_classes'])
cov_1 += res[1]
cov_3 += res[3]
cov_5 += res[5]
cov_10 += res[10]
cov_1 = cov_1 / len(outputs)
cov_3 = cov_3 / len(outputs)
cov_5 = cov_5 / len(outputs)
cov_10 = cov_10 / len(outputs)
return {
"average_mrr": average_mrr,
"hits_at_1" : hits_at_1,
"hits_at_3" : hits_at_3,
"hits_at_5" : hits_at_5,
"hits_at_10" : hits_at_10,
"coverage_at_1" : cov_1,
"coverage_at_3" : cov_3,
"coverage_at_5" : cov_5,
"coverage_at_10" : cov_10,
}
|