| """BrainCore Memory Benchmark — main evaluation harness.""" |
|
|
| import argparse |
| import importlib |
| import json |
| import time |
| from pathlib import Path |
| from typing import Any |
|
|
| from metrics import ( |
| aggregate, |
| exact_match, |
| semantic_placeholder_score, |
| temporal_order_score, |
| contradiction_resolution_score, |
| latency_ms, |
| ) |
|
|
|
|
| def load_dataset(path: str) -> list[dict]: |
| sessions = [] |
| with open(path, "r", encoding="utf-8") as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| sessions.append(json.loads(line)) |
| return sessions |
|
|
|
|
| def evaluate_session(session: dict, adapter: Any, top_k: int = 1) -> list[dict]: |
| """Ingest memories then run every query. Returns per-query result dicts.""" |
| adapter.ingest(session.get("memories", [])) |
| storage = adapter.storage_bytes() |
|
|
| results = [] |
| for q in session.get("queries", []): |
| t0 = time.perf_counter() |
| retrieved = adapter.retrieve(q["query_text"], top_k=top_k) |
| t1 = time.perf_counter() |
|
|
| pred_text = " ".join(m.get("text", "") for m in retrieved) |
| ref_text = q.get("expected_answer", "") |
|
|
| em = exact_match(pred_text, ref_text) |
| sem = semantic_placeholder_score(pred_text, ref_text) |
|
|
| qtype = q.get("query_type", "retrieval") |
| to_score = temporal_order_score(retrieved, q.get("required_memory_ids", [])) |
|
|
| |
| latest_id = q.get("latest_memory_id") if qtype == "contradiction" else None |
| cr_score = contradiction_resolution_score(retrieved, latest_id) |
|
|
| |
| if qtype != "contradiction": |
| cr_score = 1.0 |
|
|
| results.append({ |
| "session_id": session["session_id"], |
| "query_id": q["query_id"], |
| "query_type": qtype, |
| "exact_match": em, |
| "semantic_placeholder_score": sem, |
| "temporal_order_score": to_score, |
| "contradiction_resolution_score": cr_score, |
| "latency_ms": latency_ms(t0, t1), |
| "storage_bytes": storage, |
| "pred": pred_text, |
| "ref": ref_text, |
| }) |
| return results |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="BrainCore Memory Benchmark Harness") |
| parser.add_argument("--adapter", required=True, help="Dotted path to adapter module (must expose `build() -> MemoryAdapter`)") |
| parser.add_argument("--dataset", required=True, help="Path to JSONL dataset") |
| parser.add_argument("--output", required=True, help="Path to write JSON results") |
| parser.add_argument("--top_k", type=int, default=1, help="Number of memories to retrieve") |
| args = parser.parse_args() |
|
|
| |
| module = importlib.import_module(args.adapter) |
| adapter = module.build() |
|
|
| sessions = load_dataset(args.dataset) |
| all_results = [] |
| for session in sessions: |
| all_results.extend(evaluate_session(session, adapter, top_k=args.top_k)) |
|
|
| summary = aggregate(all_results) |
|
|
| payload = { |
| "config": { |
| "adapter": args.adapter, |
| "dataset": args.dataset, |
| "top_k": args.top_k, |
| }, |
| "summary": summary, |
| "per_query": all_results, |
| } |
|
|
| Path(args.output).parent.mkdir(parents=True, exist_ok=True) |
| with open(args.output, "w", encoding="utf-8") as f: |
| json.dump(payload, f, indent=2, ensure_ascii=False) |
|
|
| print("BrainCore Memory Benchmark — Summary") |
| print("=" * 40) |
| for k, v in summary.items(): |
| print(f" {k:40s}: {v}") |
| print(f"\nFull results written to {args.output}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|