| |
| """RAG retrieval: query → top-K grounded passages with reranking.""" |
|
|
| import json |
| import faiss |
| import pickle |
| import numpy as np |
| from sentence_transformers import SentenceTransformer, CrossEncoder |
| from pathlib import Path |
|
|
|
|
| class AudreyRetriever: |
| def __init__( |
| self, |
| index_dir: str = "./index", |
| embedder_path: str = "./models/bge-m3", |
| reranker_path: str = "./models/bge-reranker", |
| top_k_retrieve: int = 20, |
| top_k_rerank: int = 5, |
| ): |
| self.top_k_retrieve = top_k_retrieve |
| self.top_k_rerank = top_k_rerank |
|
|
| |
| self.index = faiss.read_index(str(Path(index_dir) / "chunks.faiss")) |
| with open(Path(index_dir) / "chunks_meta.pkl", "rb") as f: |
| self.chunks = pickle.load(f) |
| with open(Path(index_dir) / "lexicon.pkl", "rb") as f: |
| self.lexicon = pickle.load(f) |
|
|
| |
| self.embedder = SentenceTransformer(embedder_path) |
| self.reranker = CrossEncoder(reranker_path) |
|
|
| |
| self.lexicon_en = {t["en"].lower(): t for t in self.lexicon} |
| self.lexicon_zh = {t["zh"]: t for t in self.lexicon} |
|
|
| def retrieve(self, query: str) -> dict: |
| """Retrieve and rerank passages for a query.""" |
| |
| q_emb = self.embedder.encode( |
| [query], normalize_embeddings=True |
| ).astype(np.float32) |
|
|
| |
| scores, indices = self.index.search(q_emb, self.top_k_retrieve) |
| candidates = [ |
| (self.chunks[i], float(scores[0][j])) |
| for j, i in enumerate(indices[0]) |
| if i < len(self.chunks) |
| ] |
|
|
| |
| if candidates: |
| pairs = [(query, c[0]["text"]) for c in candidates] |
| rerank_scores = self.reranker.predict(pairs) |
| ranked = sorted( |
| zip(candidates, rerank_scores), |
| key=lambda x: x[1], |
| reverse=True, |
| ) |
| top_chunks = [c[0] for c, _ in ranked[: self.top_k_rerank]] |
| else: |
| top_chunks = [] |
|
|
| |
| query_lower = query.lower() |
| relevant_terms = [] |
| for term in self.lexicon: |
| if term["en"].lower() in query_lower or term["zh"] in query: |
| relevant_terms.append(term) |
|
|
| return { |
| "passages": top_chunks, |
| "lexicon_terms": relevant_terms[:10], |
| } |
|
|
| def format_context(self, result: dict) -> str: |
| """Format retrieval results as context for the LLM.""" |
| parts = [] |
|
|
| for i, (chunk, _score) in enumerate(result["passages"]): |
| parts.append( |
| f"[Source {i+1}: {chunk['date']} — {chunk['title']}]\n" |
| f"{chunk['text']}" |
| ) |
|
|
| if result["lexicon_terms"]: |
| terms = ", ".join( |
| f"{t['en']} = {t['zh']}" for t in result["lexicon_terms"] |
| ) |
| parts.append(f"\n[Terminology: {terms}]") |
|
|
| return "\n\n".join(parts) |
|
|
|
|
| if __name__ == "__main__": |
| |
| retriever = AudreyRetriever() |
|
|
| test_queries = [ |
| "How did Taiwan handle COVID-19 mask distribution?", |
| "什麼是數位民主?", |
| "What is your P(Doom)?", |
| "Tell me about vTaiwan", |
| ] |
|
|
| for q in test_queries: |
| print(f"\n{'='*60}") |
| print(f"Query: {q}") |
| result = retriever.retrieve(q) |
| print(f"Top {len(result['passages'])} passages:") |
| for i, (chunk, score) in enumerate(result["passages"]): |
| print(f" {i+1}. [{chunk['date']}] {chunk['title']}") |
| print(f" {chunk['text'][:100]}...") |
| if result["lexicon_terms"]: |
| print(f"Lexicon: {[t['en'] for t in result['lexicon_terms']]}") |
|
|