Inframat-x commited on
Commit
b8e175e
·
verified ·
1 Parent(s): 391b7dc

Update rag_eval_metrics.py

Browse files
Files changed (1) hide show
  1. rag_eval_metrics.py +35 -111
rag_eval_metrics.py CHANGED
@@ -3,30 +3,6 @@
3
  rag_eval_metrics.py
4
 
5
  Evaluate RAG retrieval quality by comparing app logs (JSONL) with a gold file (CSV).
6
-
7
- Inputs (CLI):
8
- --gold_csv Path to gold CSV.
9
- --logs_jsonl Path to app JSONL logs (rag_logs.jsonl).
10
- --k Cutoff for metrics (default: 8).
11
- --out_dir Output directory for metrics files (default: rag_artifacts).
12
-
13
- Outputs (written into out_dir):
14
- - metrics_per_question.csv
15
- - metrics_aggregate.json
16
-
17
- Gold CSV accepted schemas (case-insensitive headers):
18
- Minimal (doc-level):
19
- question, doc
20
- (multiple rows per question allowed)
21
- With page info (page-level optional):
22
- question, doc, page
23
- List-in-a-cell also supported:
24
- question, relevant_docs # semicolon/comma separated; page matching disabled in this column
25
-
26
- Notes:
27
- - Matching is case-insensitive on question and doc filename.
28
- - Page-level metrics only computed when GOLD includes a concrete page for that question.
29
- - Logs are produced by app.py and contain 'retrieval'->'hits' with 'doc' and 'page'.
30
  """
31
 
32
  import argparse
@@ -43,7 +19,6 @@ import numpy as np
43
  # ----------------------------- IO Helpers ----------------------------- #
44
 
45
  def read_logs(jsonl_path: Path) -> pd.DataFrame:
46
- """Read JSONL logs and return a DataFrame with columns: question, hits(list[dict])."""
47
  rows = []
48
  with open(jsonl_path, "r", encoding="utf-8") as f:
49
  for line in f:
@@ -57,13 +32,11 @@ def read_logs(jsonl_path: Path) -> pd.DataFrame:
57
  q = (((rec.get("inputs") or {}).get("question")) or "").strip()
58
  retr = (rec.get("retrieval") or {})
59
  hits = retr.get("hits", [])
60
- # Normalize fields we need
61
  norm_hits = []
62
  for h in hits or []:
63
  doc = (h.get("doc") or "").strip()
64
  page = str(h.get("page") or "").strip()
65
  try:
66
- # Try int page if it looks numeric
67
  page_int = int(page)
68
  except Exception:
69
  page_int = None
@@ -72,8 +45,6 @@ def read_logs(jsonl_path: Path) -> pd.DataFrame:
72
  df = pd.DataFrame(rows)
73
  if df.empty:
74
  return pd.DataFrame(columns=["question", "hits"])
75
- # Keep last occurrence per question (latest run), but also allow multiple – we aggregate by question
76
- # For stability, group and keep the last non-empty hit list.
77
  def _pick_last_non_empty(hit_lists: List[List[dict]]) -> List[dict]:
78
  for lst in reversed(hit_lists):
79
  if lst:
@@ -87,13 +58,8 @@ def read_logs(jsonl_path: Path) -> pd.DataFrame:
87
 
88
 
89
  def read_gold(csv_path: Path) -> pd.DataFrame:
90
- """Read gold CSV, normalize columns, and return rows with:
91
- question(cf), question_raw, doc (lowercased filename), page (optional, int or NaN).
92
- """
93
  df = pd.read_csv(csv_path)
94
- # Normalize headers
95
  cols = {c.lower().strip(): c for c in df.columns}
96
- # Find question column
97
  q_col = None
98
  for cand in ["question", "query", "q"]:
99
  if cand in cols:
@@ -102,7 +68,6 @@ def read_gold(csv_path: Path) -> pd.DataFrame:
102
  if q_col is None:
103
  raise ValueError("Gold CSV must contain a 'question' column (case-insensitive).")
104
 
105
- # Accept either (doc[, page]) rows or a 'relevant_docs' list column
106
  rel_list_col = None
107
  for cand in ["relevant_docs", "relevant", "docs"]:
108
  if cand in cols:
@@ -123,22 +88,17 @@ def read_gold(csv_path: Path) -> pd.DataFrame:
123
 
124
  rows = []
125
  if rel_list_col and doc_col is None:
126
- # Each row may contain a list of docs (comma/semicolon separated)
127
  for _, r in df.iterrows():
128
  q_raw = str(r[q_col]).strip()
129
  q_norm = q_raw.casefold().strip()
130
  rel_val = str(r[rel_list_col]) if pd.notna(r[rel_list_col]) else ""
131
  if not rel_val:
132
- # still create an empty row (no gold docs)
133
  rows.append({"question_raw": q_raw, "question": q_norm, "doc": None, "page": np.nan})
134
  continue
135
- # split by semicolon or comma
136
  parts = [p.strip() for p in re_split_sc(rel_val)]
137
- # one row per doc (page-level off for list column)
138
  for d in parts:
139
  rows.append({"question_raw": q_raw, "question": q_norm, "doc": filename_key(d), "page": np.nan})
140
  elif doc_col:
141
- # Standard long form: one doc (+/- page) per row
142
  for _, r in df.iterrows():
143
  q_raw = str(r[q_col]).strip()
144
  q_norm = q_raw.casefold().strip()
@@ -153,26 +113,20 @@ def read_gold(csv_path: Path) -> pd.DataFrame:
153
  raise ValueError("Gold CSV must contain either a 'doc' column or a 'relevant_docs' column.")
154
 
155
  gold = pd.DataFrame(rows)
156
- # drop fully empty doc rows (when no gold docs listed)
157
  gold["has_doc"] = gold["doc"].apply(lambda x: isinstance(x, str) and len(x) > 0)
158
  if gold["has_doc"].any():
159
  gold = gold[gold["has_doc"]].copy()
160
  gold.drop(columns=["has_doc"], inplace=True, errors="ignore")
161
- # Deduplicate
162
  gold = gold.drop_duplicates(subset=["question", "doc", "page"])
163
  return gold
164
 
165
 
166
  def filename_key(s: str) -> str:
167
- """Normalize document name to just the basename, lowercased."""
168
- s = (s or "").strip()
169
- s = s.replace("\\", "/")
170
- s = s.split("/")[-1]
171
  return s.casefold()
172
 
173
 
174
  def re_split_sc(s: str) -> List[str]:
175
- """Split on semicolons or commas."""
176
  import re
177
  return re.split(r"[;,]", s)
178
 
@@ -180,7 +134,6 @@ def re_split_sc(s: str) -> List[str]:
180
  # ----------------------------- Metric Core ----------------------------- #
181
 
182
  def dcg_at_k(relevances: List[int]) -> float:
183
- """Binary DCG with log2 discounts; ranks are 1-indexed in denominator."""
184
  dcg = 0.0
185
  for i, rel in enumerate(relevances, start=1):
186
  if rel > 0:
@@ -197,22 +150,11 @@ def ndcg_at_k(relevances: List[int]) -> float:
197
  return float(dcg / idcg)
198
 
199
 
200
- def compute_metrics_for_question(
201
- gold_docs: List[str],
202
- gold_pages: List[Optional[int]],
203
- hits: List[Dict[str, Any]],
204
- k: int
205
- ) -> Dict[str, Any]:
206
- """
207
- Returns per-question metrics at cutoff k for:
208
- - doc-level: match on doc only
209
- - page-level: match on (doc,page) where page is provided in GOLD
210
- """
211
  top = hits[:k] if hits else []
212
  pred_docs = [filename_key(h.get("doc", "")) for h in top]
213
  pred_pairs = [(filename_key(h.get("doc", "")), h.get("page", None)) for h in top]
214
 
215
- # --- DOC-LEVEL ---
216
  gold_doc_set = set([d for d in gold_docs if isinstance(d, str) and d])
217
  rel_bin_doc = [1 if d in gold_doc_set else 0 for d in pred_docs]
218
  hitk_doc = 1 if any(rel_bin_doc) else 0
@@ -220,7 +162,6 @@ def compute_metrics_for_question(
220
  rec_doc = (sum(rel_bin_doc) / max(1, len(gold_doc_set))) if gold_doc_set else 0.0
221
  ndcg_doc = ndcg_at_k(rel_bin_doc)
222
 
223
- # --- PAGE-LEVEL (only if at least one GOLD page specified) ---
224
  gold_pairs = set()
225
  for d, p in zip(gold_docs, gold_pages):
226
  if isinstance(d, str) and d and (p is not None) and (not (isinstance(p, float) and np.isnan(p))):
@@ -257,6 +198,12 @@ def compute_metrics_for_question(
257
 
258
  # ----------------------------- Orchestration ----------------------------- #
259
 
 
 
 
 
 
 
260
  def main():
261
  ap = argparse.ArgumentParser()
262
  ap.add_argument("--gold_csv", required=True, type=str)
@@ -272,28 +219,26 @@ def main():
272
  logs_path = Path(args.logs_jsonl)
273
 
274
  if not gold_path.exists():
275
- print(f"❌ gold.csv not found at {gold_path}", file=sys.stderr)
276
  sys.exit(0)
277
  if not logs_path.exists() or logs_path.stat().st_size == 0:
278
- print(f"❌ logs JSONL not found or empty at {logs_path}", file=sys.stderr)
279
  sys.exit(0)
280
 
281
- # Load data
282
  try:
283
  gold = read_gold(gold_path)
284
  except Exception as e:
285
- print(f"❌ Failed to read gold: {e}", file=sys.stderr)
286
  sys.exit(0)
287
  logs = read_logs(logs_path)
288
 
289
  if gold.empty:
290
- print("❌ Gold file contains no usable rows.", file=sys.stderr)
291
  sys.exit(0)
292
  if logs.empty:
293
- print("❌ Logs file contains no usable entries.", file=sys.stderr)
294
  sys.exit(0)
295
 
296
- # Build gold dict: question -> list of (doc, page)
297
  gdict: Dict[str, List[Tuple[str, Optional[int]]]] = {}
298
  for _, r in gold.iterrows():
299
  q = str(r["question"]).strip()
@@ -301,19 +246,14 @@ def main():
301
  p = r["page"] if "page" in r else np.nan
302
  gdict.setdefault(q, []).append((d, p))
303
 
304
- # Align on questions (casefolded)
305
  logs["q_norm"] = logs["question"].astype(str).str.casefold().str.strip()
306
  perq_rows = []
307
  not_in_logs, not_in_gold = [], []
308
 
309
  for q_norm, pairs in gdict.items():
310
- # Pairs is list of (doc, page)
311
- q_gold_variants = [q_norm] # already normalized
312
- # Find logs row with same normalized question
313
  row = logs[logs["q_norm"] == q_norm]
314
  if row.empty:
315
  not_in_logs.append(q_norm)
316
- # Still record a row with zeros/NaNs
317
  gdocs = [d for (d, _) in pairs]
318
  gpages = [p for (_, p) in pairs]
319
  metrics = {
@@ -323,35 +263,21 @@ def main():
323
  "n_gold_doc_pages": int(len([(d, p) for (d, p) in zip(gdocs, gpages) if isinstance(d, str) and d and pd.notna(p)])),
324
  "n_pred": 0
325
  }
326
- perq_rows.append({
327
- "question": q_norm,
328
- "covered_in_logs": 0,
329
- **metrics
330
- })
331
  continue
332
 
333
- # Use the last row (grouping ensured one row per question)
334
  hits = row.iloc[0]["hits"] or []
335
- # Prepare gold lists for metric function
336
  gdocs = [d for (d, _) in pairs]
337
  gpages = [p for (_, p) in pairs]
338
  metrics = compute_metrics_for_question(gdocs, gpages, hits, args.k)
 
339
 
340
- perq_rows.append({
341
- "question": q_norm,
342
- "covered_in_logs": 1,
343
- **metrics
344
- })
345
-
346
- # Detect questions present in logs but not in gold (for reporting)
347
  gold_qs = set(gdict.keys())
348
  for qn in logs["q_norm"].tolist():
349
  if qn not in gold_qs:
350
  not_in_gold.append(qn)
351
 
352
  perq = pd.DataFrame(perq_rows)
353
-
354
- # Aggregates over questions that are covered_in_logs == 1
355
  covered = perq[perq["covered_in_logs"] == 1].copy()
356
  agg = {
357
  "questions_total_gold": int(len(gdict)),
@@ -359,55 +285,53 @@ def main():
359
  "questions_missing_in_logs": int(len(not_in_logs)),
360
  "questions_in_logs_not_in_gold": int(len(set(not_in_gold))),
361
  "k": int(args.k),
362
- # DOC-level
363
  "mean_hit@k_doc": float(covered["hit@k_doc"].mean()) if not covered.empty else 0.0,
364
  "mean_precision@k_doc": float(covered["precision@k_doc"].mean()) if not covered.empty else 0.0,
365
  "mean_recall@k_doc": float(covered["recall@k_doc"].mean()) if not covered.empty else 0.0,
366
  "mean_ndcg@k_doc": float(covered["ndcg@k_doc"].mean()) if not covered.empty else 0.0,
367
- # PAGE-level (skip NaNs)
368
  "mean_hit@k_page": float(covered["hit@k_page"].dropna().mean()) if covered["hit@k_page"].notna().any() else None,
369
  "mean_precision@k_page": float(covered["precision@k_page"].dropna().mean()) if covered["precision@k_page"].notna().any() else None,
370
  "mean_recall@k_page": float(covered["recall@k_page"].dropna().mean()) if covered["recall@k_page"].notna().any() else None,
371
  "mean_ndcg@k_page": float(covered["ndcg@k_page"].dropna().mean()) if covered["ndcg@k_page"].notna().any() else None,
372
- # Distribution hints
373
  "avg_gold_docs_per_q": float(perq["n_gold_docs"].mean()) if not perq.empty else 0.0,
374
  "avg_preds_per_q": float(perq["n_pred"].mean()) if not perq.empty else 0.0,
375
- # Listings (truncated for readability)
376
  "examples_missing_in_logs": list(not_in_logs[:10]),
377
  "examples_in_logs_not_in_gold": list(dict.fromkeys(not_in_gold))[:10],
378
  }
379
 
380
- # Write outputs
381
  perq_path = out_dir / "metrics_per_question.csv"
382
  agg_path = out_dir / "metrics_aggregate.json"
383
  perq.to_csv(perq_path, index=False)
384
  with open(agg_path, "w", encoding="utf-8") as f:
385
  json.dump(agg, f, ensure_ascii=False, indent=2)
386
 
387
- # Console summary (stdout) for app display
388
- print("RAG Evaluation Summary")
389
- print("----------------------")
390
- print(f"Gold questions: {agg['questions_total_gold']}")
391
- print(f"Covered in logs: {agg['questions_covered_in_logs']}")
392
- print(f"Missing in logs: {agg['questions_missing_in_logs']}")
393
- print(f"In logs but not in gold: {agg['questions_in_logs_not_in_gold']}")
394
- print(f"k = {agg['k']}")
395
- print()
396
- print(f"Doc-level: Hit@k={_fmt(agg['mean_hit@k_doc'])} "
 
397
  f"Precision@k={_fmt(agg['mean_precision@k_doc'])} "
398
  f"Recall@k={_fmt(agg['mean_recall@k_doc'])} "
399
- f"nDCG@k={_fmt(agg['mean_ndcg@k_doc'])}")
400
- if agg["mean_hit@k_page"] is not None:
401
- print(f"Page-level: Hit@k={_fmt(agg['mean_hit@k_page'])} "
 
 
402
  f"Precision@k={_fmt(agg['mean_precision@k_page'])} "
403
  f"Recall@k={_fmt(agg['mean_recall@k_page'])} "
404
- f"nDCG@k={_fmt(agg['mean_ndcg@k_page'])}")
405
  else:
406
- print("Page-level: (no page labels in gold)")
407
 
408
  print()
409
- print(f"Wrote per-question CSV → {perq_path}")
410
- print(f"Wrote aggregate JSON → {agg_path}")
411
 
412
 
413
  def _fmt(x: Any) -> str:
 
3
  rag_eval_metrics.py
4
 
5
  Evaluate RAG retrieval quality by comparing app logs (JSONL) with a gold file (CSV).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  """
7
 
8
  import argparse
 
19
  # ----------------------------- IO Helpers ----------------------------- #
20
 
21
  def read_logs(jsonl_path: Path) -> pd.DataFrame:
 
22
  rows = []
23
  with open(jsonl_path, "r", encoding="utf-8") as f:
24
  for line in f:
 
32
  q = (((rec.get("inputs") or {}).get("question")) or "").strip()
33
  retr = (rec.get("retrieval") or {})
34
  hits = retr.get("hits", [])
 
35
  norm_hits = []
36
  for h in hits or []:
37
  doc = (h.get("doc") or "").strip()
38
  page = str(h.get("page") or "").strip()
39
  try:
 
40
  page_int = int(page)
41
  except Exception:
42
  page_int = None
 
45
  df = pd.DataFrame(rows)
46
  if df.empty:
47
  return pd.DataFrame(columns=["question", "hits"])
 
 
48
  def _pick_last_non_empty(hit_lists: List[List[dict]]) -> List[dict]:
49
  for lst in reversed(hit_lists):
50
  if lst:
 
58
 
59
 
60
  def read_gold(csv_path: Path) -> pd.DataFrame:
 
 
 
61
  df = pd.read_csv(csv_path)
 
62
  cols = {c.lower().strip(): c for c in df.columns}
 
63
  q_col = None
64
  for cand in ["question", "query", "q"]:
65
  if cand in cols:
 
68
  if q_col is None:
69
  raise ValueError("Gold CSV must contain a 'question' column (case-insensitive).")
70
 
 
71
  rel_list_col = None
72
  for cand in ["relevant_docs", "relevant", "docs"]:
73
  if cand in cols:
 
88
 
89
  rows = []
90
  if rel_list_col and doc_col is None:
 
91
  for _, r in df.iterrows():
92
  q_raw = str(r[q_col]).strip()
93
  q_norm = q_raw.casefold().strip()
94
  rel_val = str(r[rel_list_col]) if pd.notna(r[rel_list_col]) else ""
95
  if not rel_val:
 
96
  rows.append({"question_raw": q_raw, "question": q_norm, "doc": None, "page": np.nan})
97
  continue
 
98
  parts = [p.strip() for p in re_split_sc(rel_val)]
 
99
  for d in parts:
100
  rows.append({"question_raw": q_raw, "question": q_norm, "doc": filename_key(d), "page": np.nan})
101
  elif doc_col:
 
102
  for _, r in df.iterrows():
103
  q_raw = str(r[q_col]).strip()
104
  q_norm = q_raw.casefold().strip()
 
113
  raise ValueError("Gold CSV must contain either a 'doc' column or a 'relevant_docs' column.")
114
 
115
  gold = pd.DataFrame(rows)
 
116
  gold["has_doc"] = gold["doc"].apply(lambda x: isinstance(x, str) and len(x) > 0)
117
  if gold["has_doc"].any():
118
  gold = gold[gold["has_doc"]].copy()
119
  gold.drop(columns=["has_doc"], inplace=True, errors="ignore")
 
120
  gold = gold.drop_duplicates(subset=["question", "doc", "page"])
121
  return gold
122
 
123
 
124
  def filename_key(s: str) -> str:
125
+ s = (s or "").strip().replace("\\", "/").split("/")[-1]
 
 
 
126
  return s.casefold()
127
 
128
 
129
  def re_split_sc(s: str) -> List[str]:
 
130
  import re
131
  return re.split(r"[;,]", s)
132
 
 
134
  # ----------------------------- Metric Core ----------------------------- #
135
 
136
  def dcg_at_k(relevances: List[int]) -> float:
 
137
  dcg = 0.0
138
  for i, rel in enumerate(relevances, start=1):
139
  if rel > 0:
 
150
  return float(dcg / idcg)
151
 
152
 
153
+ def compute_metrics_for_question(gold_docs, gold_pages, hits, k):
 
 
 
 
 
 
 
 
 
 
154
  top = hits[:k] if hits else []
155
  pred_docs = [filename_key(h.get("doc", "")) for h in top]
156
  pred_pairs = [(filename_key(h.get("doc", "")), h.get("page", None)) for h in top]
157
 
 
158
  gold_doc_set = set([d for d in gold_docs if isinstance(d, str) and d])
159
  rel_bin_doc = [1 if d in gold_doc_set else 0 for d in pred_docs]
160
  hitk_doc = 1 if any(rel_bin_doc) else 0
 
162
  rec_doc = (sum(rel_bin_doc) / max(1, len(gold_doc_set))) if gold_doc_set else 0.0
163
  ndcg_doc = ndcg_at_k(rel_bin_doc)
164
 
 
165
  gold_pairs = set()
166
  for d, p in zip(gold_docs, gold_pages):
167
  if isinstance(d, str) and d and (p is not None) and (not (isinstance(p, float) and np.isnan(p))):
 
198
 
199
  # ----------------------------- Orchestration ----------------------------- #
200
 
201
+ # === Dark blue and accent colors ===
202
+ COLOR_TITLE = "\033[94m" # light blue for titles
203
+ COLOR_TEXT = "\033[34m" # dark blue
204
+ COLOR_ACCENT = "\033[36m" # cyan for metrics
205
+ COLOR_RESET = "\033[0m"
206
+
207
  def main():
208
  ap = argparse.ArgumentParser()
209
  ap.add_argument("--gold_csv", required=True, type=str)
 
219
  logs_path = Path(args.logs_jsonl)
220
 
221
  if not gold_path.exists():
222
+ print(f"{COLOR_TEXT}❌ gold.csv not found at {gold_path}{COLOR_RESET}", file=sys.stderr)
223
  sys.exit(0)
224
  if not logs_path.exists() or logs_path.stat().st_size == 0:
225
+ print(f"{COLOR_TEXT}❌ logs JSONL not found or empty at {logs_path}{COLOR_RESET}", file=sys.stderr)
226
  sys.exit(0)
227
 
 
228
  try:
229
  gold = read_gold(gold_path)
230
  except Exception as e:
231
+ print(f"{COLOR_TEXT}❌ Failed to read gold: {e}{COLOR_RESET}", file=sys.stderr)
232
  sys.exit(0)
233
  logs = read_logs(logs_path)
234
 
235
  if gold.empty:
236
+ print(f"{COLOR_TEXT}❌ Gold file contains no usable rows.{COLOR_RESET}", file=sys.stderr)
237
  sys.exit(0)
238
  if logs.empty:
239
+ print(f"{COLOR_TEXT}❌ Logs file contains no usable entries.{COLOR_RESET}", file=sys.stderr)
240
  sys.exit(0)
241
 
 
242
  gdict: Dict[str, List[Tuple[str, Optional[int]]]] = {}
243
  for _, r in gold.iterrows():
244
  q = str(r["question"]).strip()
 
246
  p = r["page"] if "page" in r else np.nan
247
  gdict.setdefault(q, []).append((d, p))
248
 
 
249
  logs["q_norm"] = logs["question"].astype(str).str.casefold().str.strip()
250
  perq_rows = []
251
  not_in_logs, not_in_gold = [], []
252
 
253
  for q_norm, pairs in gdict.items():
 
 
 
254
  row = logs[logs["q_norm"] == q_norm]
255
  if row.empty:
256
  not_in_logs.append(q_norm)
 
257
  gdocs = [d for (d, _) in pairs]
258
  gpages = [p for (_, p) in pairs]
259
  metrics = {
 
263
  "n_gold_doc_pages": int(len([(d, p) for (d, p) in zip(gdocs, gpages) if isinstance(d, str) and d and pd.notna(p)])),
264
  "n_pred": 0
265
  }
266
+ perq_rows.append({"question": q_norm, "covered_in_logs": 0, **metrics})
 
 
 
 
267
  continue
268
 
 
269
  hits = row.iloc[0]["hits"] or []
 
270
  gdocs = [d for (d, _) in pairs]
271
  gpages = [p for (_, p) in pairs]
272
  metrics = compute_metrics_for_question(gdocs, gpages, hits, args.k)
273
+ perq_rows.append({"question": q_norm, "covered_in_logs": 1, **metrics})
274
 
 
 
 
 
 
 
 
275
  gold_qs = set(gdict.keys())
276
  for qn in logs["q_norm"].tolist():
277
  if qn not in gold_qs:
278
  not_in_gold.append(qn)
279
 
280
  perq = pd.DataFrame(perq_rows)
 
 
281
  covered = perq[perq["covered_in_logs"] == 1].copy()
282
  agg = {
283
  "questions_total_gold": int(len(gdict)),
 
285
  "questions_missing_in_logs": int(len(not_in_logs)),
286
  "questions_in_logs_not_in_gold": int(len(set(not_in_gold))),
287
  "k": int(args.k),
 
288
  "mean_hit@k_doc": float(covered["hit@k_doc"].mean()) if not covered.empty else 0.0,
289
  "mean_precision@k_doc": float(covered["precision@k_doc"].mean()) if not covered.empty else 0.0,
290
  "mean_recall@k_doc": float(covered["recall@k_doc"].mean()) if not covered.empty else 0.0,
291
  "mean_ndcg@k_doc": float(covered["ndcg@k_doc"].mean()) if not covered.empty else 0.0,
 
292
  "mean_hit@k_page": float(covered["hit@k_page"].dropna().mean()) if covered["hit@k_page"].notna().any() else None,
293
  "mean_precision@k_page": float(covered["precision@k_page"].dropna().mean()) if covered["precision@k_page"].notna().any() else None,
294
  "mean_recall@k_page": float(covered["recall@k_page"].dropna().mean()) if covered["recall@k_page"].notna().any() else None,
295
  "mean_ndcg@k_page": float(covered["ndcg@k_page"].dropna().mean()) if covered["ndcg@k_page"].notna().any() else None,
 
296
  "avg_gold_docs_per_q": float(perq["n_gold_docs"].mean()) if not perq.empty else 0.0,
297
  "avg_preds_per_q": float(perq["n_pred"].mean()) if not perq.empty else 0.0,
 
298
  "examples_missing_in_logs": list(not_in_logs[:10]),
299
  "examples_in_logs_not_in_gold": list(dict.fromkeys(not_in_gold))[:10],
300
  }
301
 
 
302
  perq_path = out_dir / "metrics_per_question.csv"
303
  agg_path = out_dir / "metrics_aggregate.json"
304
  perq.to_csv(perq_path, index=False)
305
  with open(agg_path, "w", encoding="utf-8") as f:
306
  json.dump(agg, f, ensure_ascii=False, indent=2)
307
 
308
+ # === Console summary with color ===
309
+ print(f"{COLOR_TITLE}RAG Evaluation Summary{COLOR_RESET}")
310
+ print(f"{COLOR_TITLE}----------------------{COLOR_RESET}")
311
+ print(f"{COLOR_TEXT}Gold questions: {COLOR_ACCENT}{agg['questions_total_gold']}{COLOR_RESET}")
312
+ print(f"{COLOR_TEXT}Covered in logs: {COLOR_ACCENT}{agg['questions_covered_in_logs']}{COLOR_RESET}")
313
+ print(f"{COLOR_TEXT}Missing in logs: {COLOR_ACCENT}{agg['questions_missing_in_logs']}{COLOR_RESET}")
314
+ print(f"{COLOR_TEXT}In logs but not in gold: {COLOR_ACCENT}{agg['questions_in_logs_not_in_gold']}{COLOR_RESET}")
315
+ print(f"{COLOR_TEXT}k = {COLOR_ACCENT}{agg['k']}{COLOR_RESET}\n")
316
+
317
+ print(f"{COLOR_TEXT}Doc-level:{COLOR_RESET} "
318
+ f"{COLOR_ACCENT}Hit@k={_fmt(agg['mean_hit@k_doc'])} "
319
  f"Precision@k={_fmt(agg['mean_precision@k_doc'])} "
320
  f"Recall@k={_fmt(agg['mean_recall@k_doc'])} "
321
+ f"nDCG@k={_fmt(agg['mean_ndcg@k_doc'])}{COLOR_RESET}")
322
+
323
+ if agg['mean_hit@k_page'] is not None:
324
+ print(f"{COLOR_TEXT}Page-level:{COLOR_RESET} "
325
+ f"{COLOR_ACCENT}Hit@k={_fmt(agg['mean_hit@k_page'])} "
326
  f"Precision@k={_fmt(agg['mean_precision@k_page'])} "
327
  f"Recall@k={_fmt(agg['mean_recall@k_page'])} "
328
+ f"nDCG@k={_fmt(agg['mean_ndcg@k_page'])}{COLOR_RESET}")
329
  else:
330
+ print(f"{COLOR_TEXT}Page-level: (no page labels in gold){COLOR_RESET}")
331
 
332
  print()
333
+ print(f"{COLOR_TEXT}Wrote per-question CSV → {COLOR_ACCENT}{perq_path}{COLOR_RESET}")
334
+ print(f"{COLOR_TEXT}Wrote aggregate JSON → {COLOR_ACCENT}{agg_path}{COLOR_RESET}")
335
 
336
 
337
  def _fmt(x: Any) -> str: