comments about bugs in metric calculation
#20
by
tytskiy
- opened
benchmarks/yambda/evaluation/metrics.py
CHANGED
|
@@ -47,6 +47,9 @@ class Recall(Metric):
|
|
| 47 |
num_positives = targets.lengths.to(torch.float32)
|
| 48 |
num_positives[num_positives == 0] = torch.inf
|
| 49 |
|
|
|
|
|
|
|
|
|
|
| 50 |
values[k] = target_mask[:, :k].to(torch.float32).sum(dim=-1) / num_positives
|
| 51 |
|
| 52 |
values[k] = torch.mean(values[k]).item()
|
|
@@ -134,6 +137,12 @@ class NDCG(Metric):
|
|
| 134 |
def __call__(
|
| 135 |
self, ranked: Ranked | None, targets: Targets, target_mask: torch.Tensor, ks: Iterable[int]
|
| 136 |
) -> dict[int, float]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
actual_dcg = DCG()(ranked, targets, target_mask, ks)
|
| 138 |
|
| 139 |
ideal_target_mask = (
|
|
|
|
| 47 |
num_positives = targets.lengths.to(torch.float32)
|
| 48 |
num_positives[num_positives == 0] = torch.inf
|
| 49 |
|
| 50 |
+
# there is a bug: we divide by num_positives instead of max(num_positives, k)
|
| 51 |
+
# this may slightly affect the absolute metric values,
|
| 52 |
+
# but as far as we can judge it does not change the ranking of the models reported in the paper.
|
| 53 |
values[k] = target_mask[:, :k].to(torch.float32).sum(dim=-1) / num_positives
|
| 54 |
|
| 55 |
values[k] = torch.mean(values[k]).item()
|
|
|
|
| 137 |
def __call__(
|
| 138 |
self, ranked: Ranked | None, targets: Targets, target_mask: torch.Tensor, ks: Iterable[int]
|
| 139 |
) -> dict[int, float]:
|
| 140 |
+
|
| 141 |
+
# there is a bug: we compute (dcg_1 + ... + dcg_n) / (idcg_1 + ... + idcg_n)
|
| 142 |
+
# instead of (1 / n) * (dcg_1 / idcg_1 + ... + dcg_n / idcg_n)
|
| 143 |
+
# this may affect the absolute metric values,
|
| 144 |
+
# but as far as we can judge it does not change the ranking of the models reported in the paper.
|
| 145 |
+
|
| 146 |
actual_dcg = DCG()(ranked, targets, target_mask, ks)
|
| 147 |
|
| 148 |
ideal_target_mask = (
|