Fix bugs in metrics

#19
by tytskiy - opened
benchmarks/yambda/evaluation/metrics.py CHANGED
@@ -47,7 +47,8 @@ class Recall(Metric):
47
  num_positives = targets.lengths.to(torch.float32)
48
  num_positives[num_positives == 0] = torch.inf
49
 
50
- values[k] = target_mask[:, :k].to(torch.float32).sum(dim=-1) / num_positives
 
51
 
52
  values[k] = torch.mean(values[k]).item()
53
 
@@ -134,16 +135,39 @@ class NDCG(Metric):
134
  def __call__(
135
  self, ranked: Ranked | None, targets: Targets, target_mask: torch.Tensor, ks: Iterable[int]
136
  ) -> dict[int, float]:
137
- actual_dcg = DCG()(ranked, targets, target_mask, ks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
  ideal_target_mask = (
140
  torch.arange(target_mask.shape[1], device=targets.device)[None, :] < targets.lengths[:, None]
141
  ).to(torch.float32)
142
  assert target_mask.shape == ideal_target_mask.shape
143
 
144
- ideal_dcg = DCG()(ranked, targets, ideal_target_mask, ks)
 
 
 
 
 
145
 
146
- ndcg_values = {k: (actual_dcg[k] / ideal_dcg[k] if ideal_dcg[k] != 0 else 0.0) for k in ks}
147
 
148
  return ndcg_values
149
 
 
47
  num_positives = targets.lengths.to(torch.float32)
48
  num_positives[num_positives == 0] = torch.inf
49
 
50
+ # there was a bug: we divided by num_positives instead of max(num_positives, k)
51
+ values[k] = target_mask[:, :k].to(torch.float32).sum(dim=-1) / torch.clamp(num_positives, max=k)
52
 
53
  values[k] = torch.mean(values[k]).item()
54
 
 
135
  def __call__(
136
  self, ranked: Ranked | None, targets: Targets, target_mask: torch.Tensor, ks: Iterable[int]
137
  ) -> dict[int, float]:
138
+
139
+ # there was a bug: we computed (dcg_1 + ... + dcg_n) / (idcg_1 + ... + idcg_n)
140
+ # instead of (1 / n) * (dcg_1 / idcg_1 + ... + dcg_n / idcg_n)
141
+
142
+ assert all(0 < k <= target_mask.shape[1] for k in ks)
143
+
144
+ def calc_dcg(target_mask: torch.Tensor) -> dict[int, torch.Tensor]:
145
+ values = {}
146
+
147
+ discounts = 1.0 / torch.log2(
148
+ torch.arange(2, target_mask.shape[1] + 2, device=target_mask.device, dtype=torch.float32)
149
+ )
150
+
151
+ for k in ks:
152
+ dcg_k = torch.sum(target_mask[:, :k] * discounts[:k], dim=1)
153
+ values[k] = dcg_k
154
+ return values
155
+
156
+ actual_dcg = calc_dcg(target_mask)
157
 
158
  ideal_target_mask = (
159
  torch.arange(target_mask.shape[1], device=targets.device)[None, :] < targets.lengths[:, None]
160
  ).to(torch.float32)
161
  assert target_mask.shape == ideal_target_mask.shape
162
 
163
+ ideal_dcg = calc_dcg(target_mask)
164
+
165
+ def divide(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
166
+ assert x.shape == y.shape
167
+ assert x.shape[0] == target_mask.shape[0]
168
+ return torch.where(y == 0, 0, x / y).mean()
169
 
170
+ ndcg_values = {k: divide(actual_dcg[k], ideal_dcg[k]).item() for k in ks}
171
 
172
  return ndcg_values
173