hotchpotch commited on
Commit
3b639dd
·
verified ·
1 Parent(s): 6dde7e0

Add NanoCodeSearchNet eval script

Browse files
Files changed (1) hide show
  1. nano_code_search_net_eval.py +393 -0
nano_code_search_net_eval.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Evaluate a SentenceTransformer model on NanoCodeSearchNet (NDCG@10).
3
+
4
+ This mirrors the NanoBEIR evaluation style from sentence-transformers, adapted to
5
+ hotchpotch/NanoCodeSearchNet's layout (configs: corpus/queries/qrels, splits: NanoCodeSearchNet{Lang}).
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import argparse
11
+ import json
12
+ import logging
13
+ import time
14
+ from collections.abc import Callable, Sequence
15
+ from typing import Any, cast
16
+
17
+ import numpy as np
18
+ from sentence_transformers import SentenceTransformer
19
+ from sentence_transformers.evaluation import InformationRetrievalEvaluator
20
+ from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
21
+ from sentence_transformers.similarity_functions import SimilarityFunction
22
+ from sentence_transformers.util import is_datasets_available
23
+ from torch import Tensor
24
+ from tqdm import tqdm
25
+
26
+ DATASET_ID = "hotchpotch/NanoCodeSearchNet"
27
+
28
+ LANGS = ["Go", "Java", "JavaScript", "PHP", "Python", "Ruby"]
29
+ _LANGS_BY_LOWER = {name.lower(): name for name in LANGS}
30
+ ALIASES = {
31
+ "js": "JavaScript",
32
+ "py": "Python",
33
+ }
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+
38
+ def _normalize_lang(name: str) -> str:
39
+ key = name.lower()
40
+ key = ALIASES.get(key, key)
41
+ return _LANGS_BY_LOWER.get(key, name)
42
+
43
+
44
+ def _split_name(lang: str) -> str:
45
+ return f"NanoCodeSearchNet{lang}"
46
+
47
+
48
+ def _human_readable(lang: str) -> str:
49
+ return f"NanoCodeSearchNet-{lang}"
50
+
51
+
52
+ class NanoCodeSearchNetEvaluator(SentenceEvaluator):
53
+ """Evaluate a model on NanoCodeSearchNet across languages."""
54
+
55
+ information_retrieval_class = InformationRetrievalEvaluator
56
+
57
+ def __init__(
58
+ self,
59
+ dataset_names: list[str] | None = None,
60
+ dataset_id: str = DATASET_ID,
61
+ mrr_at_k: list[int] | None = None,
62
+ ndcg_at_k: list[int] | None = None,
63
+ accuracy_at_k: list[int] | None = None,
64
+ precision_recall_at_k: list[int] | None = None,
65
+ map_at_k: list[int] | None = None,
66
+ show_progress_bar: bool = False,
67
+ batch_size: int = 32,
68
+ write_csv: bool = True,
69
+ truncate_dim: int | None = None,
70
+ score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
71
+ main_score_function: str | SimilarityFunction | None = None,
72
+ aggregate_fn: Callable[[list[float]], float] = np.mean,
73
+ aggregate_key: str = "mean",
74
+ query_prompts: str | dict[str, str] | None = None,
75
+ corpus_prompts: str | dict[str, str] | None = None,
76
+ write_predictions: bool = False,
77
+ ndcg_only: bool = True,
78
+ ) -> None:
79
+ super().__init__()
80
+
81
+ if dataset_names is None:
82
+ dataset_names = LANGS
83
+ self.dataset_names = [_normalize_lang(name) for name in dataset_names]
84
+ self.dataset_id = dataset_id
85
+ self.aggregate_fn = aggregate_fn
86
+ self.aggregate_key = aggregate_key
87
+ self.write_csv = write_csv
88
+
89
+ self.query_prompts = self._normalize_prompts(query_prompts)
90
+ self.corpus_prompts = self._normalize_prompts(corpus_prompts)
91
+
92
+ self.show_progress_bar = show_progress_bar
93
+ self.score_functions = score_functions or {}
94
+ self.score_function_names = sorted(self.score_functions.keys())
95
+ self.main_score_function = main_score_function
96
+ self.truncate_dim = truncate_dim
97
+ self.name = f"NanoCodeSearchNet_{aggregate_key}"
98
+ if self.truncate_dim:
99
+ self.name += f"_{self.truncate_dim}"
100
+
101
+ self.ndcg_only = ndcg_only
102
+ self.mrr_at_k = mrr_at_k or [10]
103
+ self.ndcg_at_k = ndcg_at_k or [10]
104
+ if ndcg_only:
105
+ self.accuracy_at_k = [10]
106
+ self.precision_recall_at_k = [10]
107
+ self.map_at_k = [10]
108
+ else:
109
+ self.accuracy_at_k = accuracy_at_k or [1, 3, 5, 10]
110
+ self.precision_recall_at_k = precision_recall_at_k or [1, 3, 5, 10]
111
+ self.map_at_k = map_at_k or [100]
112
+
113
+ self._validate_dataset_names()
114
+ self._validate_prompts()
115
+
116
+ ir_kwargs = {
117
+ "mrr_at_k": self.mrr_at_k,
118
+ "ndcg_at_k": self.ndcg_at_k,
119
+ "accuracy_at_k": self.accuracy_at_k,
120
+ "precision_recall_at_k": self.precision_recall_at_k,
121
+ "map_at_k": self.map_at_k,
122
+ "show_progress_bar": show_progress_bar,
123
+ "batch_size": batch_size,
124
+ "write_csv": write_csv,
125
+ "truncate_dim": truncate_dim,
126
+ "score_functions": score_functions,
127
+ "main_score_function": main_score_function,
128
+ "write_predictions": write_predictions,
129
+ }
130
+
131
+ self.evaluators = [
132
+ self._load_dataset(name, **ir_kwargs)
133
+ for name in tqdm(self.dataset_names, desc="Loading NanoCodeSearchNet", leave=False)
134
+ ]
135
+
136
+ self.csv_file = f"NanoCodeSearchNet_evaluation_{aggregate_key}_results.csv"
137
+ self.csv_headers = ["epoch", "steps"]
138
+ self._append_csv_headers(self.score_function_names)
139
+
140
+ def _normalize_prompts(self, prompts: str | dict[str, str] | None) -> dict[str, str] | None:
141
+ if prompts is None:
142
+ return None
143
+ if isinstance(prompts, str):
144
+ return {name: prompts for name in self.dataset_names}
145
+ normalized: dict[str, str] = {}
146
+ for key, value in prompts.items():
147
+ normalized[_normalize_lang(key)] = value
148
+ return normalized
149
+
150
+ def _append_csv_headers(self, score_function_names):
151
+ for score_name in score_function_names:
152
+ for k in self.accuracy_at_k:
153
+ self.csv_headers.append(f"{score_name}-Accuracy@{k}")
154
+ for k in self.precision_recall_at_k:
155
+ self.csv_headers.append(f"{score_name}-Precision@{k}")
156
+ self.csv_headers.append(f"{score_name}-Recall@{k}")
157
+ for k in self.mrr_at_k:
158
+ self.csv_headers.append(f"{score_name}-MRR@{k}")
159
+ for k in self.ndcg_at_k:
160
+ self.csv_headers.append(f"{score_name}-NDCG@{k}")
161
+ for k in self.map_at_k:
162
+ self.csv_headers.append(f"{score_name}-MAP@{k}")
163
+
164
+ def _load_dataset(self, lang: str, **ir_kwargs) -> InformationRetrievalEvaluator:
165
+ if not is_datasets_available():
166
+ raise ValueError("datasets is required; install via `pip install datasets`.")
167
+
168
+ from datasets import load_dataset
169
+
170
+ split_name = _split_name(lang)
171
+ t0 = time.perf_counter()
172
+ corpus_ds = load_dataset(self.dataset_id, "corpus", split=split_name)
173
+ queries_ds = load_dataset(self.dataset_id, "queries", split=split_name)
174
+ qrels_ds = load_dataset(self.dataset_id, "qrels", split=split_name)
175
+ logger.info("[NanoCodeSearchNet] loaded datasets for %s in %.2fs", lang, time.perf_counter() - t0)
176
+
177
+ corpus_dict = {}
178
+ t1 = time.perf_counter()
179
+ for sample in corpus_ds:
180
+ row = cast(dict[str, Any], sample)
181
+ text = row.get("text")
182
+ if text:
183
+ corpus_dict[row["_id"]] = text
184
+
185
+ queries_dict = {}
186
+ for sample in queries_ds:
187
+ row = cast(dict[str, Any], sample)
188
+ text = row.get("text")
189
+ if text:
190
+ queries_dict[row["_id"]] = text
191
+
192
+ qrels_dict: dict[str, set[str]] = {}
193
+ for sample in qrels_ds:
194
+ row = cast(dict[str, Any], sample)
195
+ qid = row["query-id"]
196
+ cids = row["corpus-id"]
197
+ if isinstance(cids, list):
198
+ qrels_dict.setdefault(qid, set()).update(cids)
199
+ else:
200
+ qrels_dict.setdefault(qid, set()).add(cids)
201
+
202
+ logger.info(
203
+ "[NanoCodeSearchNet] materialized dicts for %s in %.2fs (corpus=%d, queries=%d, qrels=%d)",
204
+ lang,
205
+ time.perf_counter() - t1,
206
+ len(corpus_dict),
207
+ len(queries_dict),
208
+ len(qrels_dict),
209
+ )
210
+
211
+ if self.query_prompts is not None:
212
+ ir_kwargs["query_prompt"] = self.query_prompts.get(lang, None)
213
+ if self.corpus_prompts is not None:
214
+ ir_kwargs["corpus_prompt"] = self.corpus_prompts.get(lang, None)
215
+
216
+ evaluator = InformationRetrievalEvaluator(
217
+ queries_dict,
218
+ corpus_dict,
219
+ qrels_dict,
220
+ name=_split_name(lang),
221
+ **ir_kwargs,
222
+ )
223
+ return evaluator
224
+
225
+ def _validate_dataset_names(self) -> None:
226
+ valid = set(LANGS)
227
+ missing = [name for name in self.dataset_names if name not in valid]
228
+ if missing:
229
+ raise ValueError(f"Invalid language(s): {missing}. Valid: {sorted(valid)}")
230
+
231
+ def _validate_prompts(self) -> None:
232
+ error_msg = ""
233
+ if self.query_prompts is not None:
234
+ missing = [lang for lang in self.dataset_names if lang not in self.query_prompts]
235
+ if missing:
236
+ error_msg += f"Missing query prompts for: {missing}\n"
237
+ if self.corpus_prompts is not None:
238
+ missing = [lang for lang in self.dataset_names if lang not in self.corpus_prompts]
239
+ if missing:
240
+ error_msg += f"Missing corpus prompts for: {missing}\n"
241
+ if error_msg:
242
+ raise ValueError(error_msg.strip())
243
+
244
+ def __call__(
245
+ self,
246
+ model: SentenceTransformer,
247
+ output_path: str | None = None,
248
+ epoch: int = -1,
249
+ steps: int = -1,
250
+ *args,
251
+ **kwargs,
252
+ ) -> dict[str, float]:
253
+ per_metric_agg: dict[str, list[float]] = {}
254
+ per_dataset: dict[str, float] = {}
255
+
256
+ if self.score_functions is None:
257
+ self.score_functions = {model.similarity_fn_name: model.similarity}
258
+ self.score_function_names = [model.similarity_fn_name]
259
+ self._append_csv_headers(self.score_function_names)
260
+
261
+ for evaluator in tqdm(self.evaluators, desc="Evaluating NanoCodeSearchNet", disable=not self.show_progress_bar):
262
+ logger.info("Evaluating %s", evaluator.name)
263
+ results = evaluator(model, output_path, epoch, steps)
264
+ for key, value in results.items():
265
+ per_dataset[key] = value
266
+
267
+ if "_" in key:
268
+ _, metric_name = key.split("_", 1)
269
+ else:
270
+ metric_name = key
271
+ per_metric_agg.setdefault(metric_name, []).append(value)
272
+
273
+ agg_results = {
274
+ f"{self.name}_{metric}": self.aggregate_fn(vals)
275
+ for metric, vals in per_metric_agg.items()
276
+ }
277
+
278
+ if not self.primary_metric:
279
+ main_score_fn = self.main_score_function
280
+ main = None if main_score_fn is None else str(main_score_fn)
281
+ ndcg_target = f"ndcg@{max(self.ndcg_at_k)}"
282
+ candidates = [k for k in agg_results if k.endswith(ndcg_target)]
283
+ if main:
284
+ preferred = [k for k in candidates if main in k]
285
+ if preferred:
286
+ self.primary_metric = preferred[0]
287
+ if not self.primary_metric and candidates:
288
+ self.primary_metric = candidates[0]
289
+
290
+ if self.primary_metric and self.primary_metric in agg_results:
291
+ logger.info("Primary %s: %.4f", self.primary_metric, agg_results[self.primary_metric])
292
+
293
+ per_dataset.update(agg_results)
294
+ if self.ndcg_only:
295
+ per_dataset = {k: v for k, v in per_dataset.items() if "ndcg@10" in k}
296
+ return per_dataset
297
+
298
+
299
+ def parse_args() -> argparse.Namespace:
300
+ parser = argparse.ArgumentParser(description="Evaluate a model on NanoCodeSearchNet")
301
+ parser.add_argument("--model-path", required=True, help="Path or HF id for SentenceTransformer model")
302
+ parser.add_argument("--langs", nargs="*", default=None, help="Languages (default: all)")
303
+ parser.add_argument("--batch-size", type=int, default=128, help="Eval batch size")
304
+ parser.add_argument("--output", default=None, help="Optional JSON output path for metrics")
305
+ parser.add_argument("--show-progress", action="store_true", help="Show per-language tqdm during eval")
306
+ parser.add_argument(
307
+ "--no-autocast",
308
+ action="store_true",
309
+ help="Disable torch.autocast (default: enabled on CUDA with bf16 if available)",
310
+ )
311
+ parser.add_argument(
312
+ "--autocast-dtype",
313
+ choices=["bf16", "fp16"],
314
+ default="bf16",
315
+ help="autocast dtype (bf16 or fp16)",
316
+ )
317
+ parser.add_argument("--query-prompt", default=None, help="Prefix applied to queries")
318
+ parser.add_argument("--corpus-prompt", default=None, help="Prefix applied to corpus/passages")
319
+ parser.add_argument(
320
+ "--all-metrics",
321
+ action="store_true",
322
+ help="Return all metrics (default: ndcg@10 only)",
323
+ )
324
+ parser.add_argument(
325
+ "--trust-remote-code",
326
+ action="store_true",
327
+ help="Pass trust_remote_code=True to SentenceTransformer (needed for some HF models)",
328
+ )
329
+ return parser.parse_args()
330
+
331
+
332
+ def main(argv: Sequence[str] | None = None) -> None:
333
+ args = parse_args()
334
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
335
+
336
+ langs = args.langs or LANGS
337
+
338
+ model = SentenceTransformer(args.model_path, prompts=None, trust_remote_code=args.trust_remote_code)
339
+ model.eval()
340
+
341
+ evaluator = NanoCodeSearchNetEvaluator(
342
+ dataset_names=langs,
343
+ batch_size=args.batch_size,
344
+ show_progress_bar=args.show_progress,
345
+ write_csv=False,
346
+ query_prompts=args.query_prompt if args.query_prompt else None,
347
+ corpus_prompts=args.corpus_prompt if args.corpus_prompt else None,
348
+ ndcg_only=not args.all_metrics,
349
+ )
350
+
351
+ use_autocast = not args.no_autocast
352
+ autocast_dtype = {"bf16": "bfloat16", "fp16": "float16"}[args.autocast_dtype]
353
+ autocast_ctx = None
354
+ if use_autocast:
355
+ import torch
356
+
357
+ device_type = "cuda" if torch.cuda.is_available() else "cpu"
358
+ autocast_ctx = torch.autocast(device_type=device_type, dtype=getattr(torch, autocast_dtype))
359
+
360
+ if autocast_ctx:
361
+ with autocast_ctx:
362
+ results = evaluator(model)
363
+ else:
364
+ results = evaluator(model)
365
+
366
+ score_fn = model.similarity_fn_name
367
+ ndcg_key_suffix = f"{score_fn}_ndcg@10"
368
+
369
+ per_lang = {}
370
+ for lang in evaluator.dataset_names:
371
+ key = f"{_split_name(lang)}_{ndcg_key_suffix}"
372
+ if key in results:
373
+ per_lang[lang] = results[key]
374
+
375
+ avg = float(np.mean(list(per_lang.values()))) if per_lang else float("nan")
376
+
377
+ print("NanoCodeSearchNet Evaluation (NDCG@10)")
378
+ print(f"Model: {args.model_path}")
379
+ for lang in evaluator.dataset_names:
380
+ val = per_lang.get(lang)
381
+ if val is None:
382
+ continue
383
+ print(f"{_split_name(lang)}_{ndcg_key_suffix}: {val:.4f}")
384
+ print(f"NanoCodeSearchNet_mean_{ndcg_key_suffix}: {avg:.4f}")
385
+
386
+ if args.output:
387
+ payload = {"model": args.model_path, "avg": avg, "per_lang": per_lang, "metrics": results}
388
+ with open(args.output, "w", encoding="utf-8") as f:
389
+ json.dump(payload, f, ensure_ascii=False, indent=2)
390
+
391
+
392
+ if __name__ == "__main__":
393
+ main()