Fix bugs in metrics

#21
by tytskiy - opened
benchmarks/yambda/evaluation/metrics.py → Fix bugs in metrics RENAMED
@@ -47,7 +47,10 @@ class Recall(Metric):
47
  num_positives = targets.lengths.to(torch.float32)
48
  num_positives[num_positives == 0] = torch.inf
49
 
50
- values[k] = target_mask[:, :k].to(torch.float32).sum(dim=-1) / num_positives
 
 
 
51
 
52
  values[k] = torch.mean(values[k]).item()
53
 
@@ -134,16 +137,41 @@ class NDCG(Metric):
134
  def __call__(
135
  self, ranked: Ranked | None, targets: Targets, target_mask: torch.Tensor, ks: Iterable[int]
136
  ) -> dict[int, float]:
137
- actual_dcg = DCG()(ranked, targets, target_mask, ks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
  ideal_target_mask = (
140
  torch.arange(target_mask.shape[1], device=targets.device)[None, :] < targets.lengths[:, None]
141
  ).to(torch.float32)
142
  assert target_mask.shape == ideal_target_mask.shape
143
 
144
- ideal_dcg = DCG()(ranked, targets, ideal_target_mask, ks)
 
 
 
 
 
145
 
146
- ndcg_values = {k: (actual_dcg[k] / ideal_dcg[k] if ideal_dcg[k] != 0 else 0.0) for k in ks}
147
 
148
  return ndcg_values
149
 
@@ -201,4 +229,4 @@ def calc_metrics(ranked: Ranked, targets: Targets, metrics: list[str]) -> dict[s
201
  for name, ks in grouped_metrics.items():
202
  result[name] = REGISTERED_METRIC_FN[name](ranked, targets, target_mask, ks=ks)
203
 
204
- return result
 
47
  num_positives = targets.lengths.to(torch.float32)
48
  num_positives[num_positives == 0] = torch.inf
49
 
50
+ # there was a bug: we divided by num_positives instead of max(num_positives, k)
51
+ # this may have slightly affected the absolute metric values,
52
+ # but as far as we can judge it didn't change the ranking of the models reported in the paper.
53
+ values[k] = target_mask[:, :k].to(torch.float32).sum(dim=-1) / torch.clamp(num_positives, max=k)
54
 
55
  values[k] = torch.mean(values[k]).item()
56
 
 
137
  def __call__(
138
  self, ranked: Ranked | None, targets: Targets, target_mask: torch.Tensor, ks: Iterable[int]
139
  ) -> dict[int, float]:
140
+
141
+ # there was a bug: we computed (dcg_1 + ... + dcg_n) / (idcg_1 + ... + idcg_n)
142
+ # instead of (1 / n) * (dcg_1 / idcg_1 + ... + dcg_n / idcg_n)
143
+ # this may have affected the absolute metric values,
144
+ # but as far as we can judge it didn't change the ranking of the models reported in the paper.
145
+
146
+ assert all(0 < k <= target_mask.shape[1] for k in ks)
147
+
148
+ def calc_dcg(target_mask: torch.Tensor) -> dict[int, torch.Tensor]:
149
+ values = {}
150
+
151
+ discounts = 1.0 / torch.log2(
152
+ torch.arange(2, target_mask.shape[1] + 2, device=target_mask.device, dtype=torch.float32)
153
+ )
154
+
155
+ for k in ks:
156
+ dcg_k = torch.sum(target_mask[:, :k] * discounts[:k], dim=1)
157
+ values[k] = dcg_k
158
+ return values
159
+
160
+ actual_dcg = calc_dcg(target_mask)
161
 
162
  ideal_target_mask = (
163
  torch.arange(target_mask.shape[1], device=targets.device)[None, :] < targets.lengths[:, None]
164
  ).to(torch.float32)
165
  assert target_mask.shape == ideal_target_mask.shape
166
 
167
+ ideal_dcg = calc_dcg(target_mask)
168
+
169
+ def divide(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
170
+ assert x.shape == y.shape
171
+ assert x.shape[0] == target_mask.shape[0]
172
+ return torch.where(y == 0, 0, x / y).mean()
173
 
174
+ ndcg_values = {k: divide(actual_dcg[k], ideal_dcg[k]).item() for k in ks}
175
 
176
  return ndcg_values
177
 
 
229
  for name, ks in grouped_metrics.items():
230
  result[name] = REGISTERED_METRIC_FN[name](ranked, targets, target_mask, ks=ks)
231
 
232
+ return result