Spaces:
Sleeping
Sleeping
File size: 16,374 Bytes
04ab625 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 |
"""
Optimized RAG Implementation - All optimization techniques applied.
IMPROVED: Better keyword filtering that doesn't eliminate all results.
"""
import time
import numpy as np
from sentence_transformers import SentenceTransformer
import faiss
import sqlite3
import hashlib
from typing import List, Tuple, Optional, Dict, Any
from pathlib import Path
from datetime import datetime, timedelta
import re
from collections import defaultdict
import psutil
import os
from config import (
EMBEDDING_MODEL, DATA_DIR, FAISS_INDEX_PATH, DOCSTORE_PATH,
EMBEDDING_CACHE_PATH, CHUNK_SIZE, TOP_K_DYNAMIC,
MAX_TOKENS, ENABLE_EMBEDDING_CACHE, ENABLE_QUERY_CACHE,
USE_QUANTIZED_LLM, BATCH_SIZE, ENABLE_PRE_FILTER
)
class OptimizedRAG:
"""
Optimized RAG implementation with:
1. Embedding caching
2. IMPROVED Pre-filtering (less aggressive)
3. Dynamic top-k
4. Prompt compression
5. Quantized inference
6. Async-ready design
"""
def __init__(self, metrics_tracker=None):
self.metrics_tracker = metrics_tracker
self.embedder = None
self.faiss_index = None
self.docstore_conn = None
self.cache_conn = None
self.query_cache: Dict[str, Tuple[str, float]] = {}
self._initialized = False
self.process = psutil.Process(os.getpid())
def initialize(self):
"""Lazy initialization with warm-up."""
if self._initialized:
return
print("Initializing Optimized RAG...")
start_time = time.perf_counter()
# 1. Load embedding model (warm it up)
self.embedder = SentenceTransformer(EMBEDDING_MODEL)
# Warm up with a small batch
self.embedder.encode(["warmup"])
# 2. Load FAISS index
if FAISS_INDEX_PATH.exists():
self.faiss_index = faiss.read_index(str(FAISS_INDEX_PATH))
# 3. Connect to document stores
self.docstore_conn = sqlite3.connect(DOCSTORE_PATH)
self._init_docstore_indices()
# 4. Initialize embedding cache
if ENABLE_EMBEDDING_CACHE:
self.cache_conn = sqlite3.connect(EMBEDDING_CACHE_PATH)
self._init_cache_schema()
# 5. Load keyword filter (simple implementation)
self.keyword_index = self._build_keyword_index()
init_time = (time.perf_counter() - start_time) * 1000
memory_mb = self.process.memory_info().rss / 1024 / 1024
print(f"Optimized RAG initialized in {init_time:.2f}ms, Memory: {memory_mb:.2f}MB")
print(f"Built keyword index with {len(self.keyword_index)} unique words")
self._initialized = True
def _init_docstore_indices(self):
"""Create performance indices on document store."""
cursor = self.docstore_conn.cursor()
cursor.execute("CREATE INDEX IF NOT EXISTS idx_chunk_hash ON chunks(chunk_hash)")
cursor.execute("CREATE INDEX IF NOT EXISTS idx_doc_id ON chunks(doc_id)")
self.docstore_conn.commit()
def _init_cache_schema(self):
"""Initialize embedding cache schema."""
cursor = self.cache_conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS embedding_cache (
text_hash TEXT PRIMARY KEY,
embedding BLOB NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
access_count INTEGER DEFAULT 0
)
""")
cursor.execute("CREATE INDEX IF NOT EXISTS idx_created_at ON embedding_cache(created_at)")
self.cache_conn.commit()
def _build_keyword_index(self) -> Dict[str, List[int]]:
"""Build a simple keyword-to-chunk index for pre-filtering."""
cursor = self.docstore_conn.cursor()
cursor.execute("SELECT id, chunk_text FROM chunks")
chunks = cursor.fetchall()
keyword_index = defaultdict(list)
for chunk_id, text in chunks:
# Simple keyword extraction (in production, use better NLP)
words = set(re.findall(r'\b\w{3,}\b', text.lower()))
for word in words:
keyword_index[word].append(chunk_id)
return keyword_index
def _get_cached_embedding(self, text: str) -> Optional[np.ndarray]:
"""Get embedding from cache if available."""
if not ENABLE_EMBEDDING_CACHE or not self.cache_conn:
return None
text_hash = hashlib.md5(text.encode()).hexdigest()
cursor = self.cache_conn.cursor()
cursor.execute(
"SELECT embedding FROM embedding_cache WHERE text_hash = ?",
(text_hash,)
)
result = cursor.fetchone()
if result:
# Update access count
cursor.execute(
"UPDATE embedding_cache SET access_count = access_count + 1 WHERE text_hash = ?",
(text_hash,)
)
self.cache_conn.commit()
# Deserialize embedding
embedding = np.frombuffer(result[0], dtype=np.float32)
return embedding
return None
def _cache_embedding(self, text: str, embedding: np.ndarray):
"""Cache an embedding."""
if not ENABLE_EMBEDDING_CACHE or not self.cache_conn:
return
text_hash = hashlib.md5(text.encode()).hexdigest()
embedding_blob = embedding.astype(np.float32).tobytes()
cursor = self.cache_conn.cursor()
cursor.execute(
"""INSERT OR REPLACE INTO embedding_cache
(text_hash, embedding, access_count) VALUES (?, ?, 1)""",
(text_hash, embedding_blob)
)
self.cache_conn.commit()
def _get_dynamic_top_k(self, question: str) -> int:
"""Determine top_k based on query complexity."""
words = len(question.split())
if words < 10:
return TOP_K_DYNAMIC["short"]
elif words < 30:
return TOP_K_DYNAMIC["medium"]
else:
return TOP_K_DYNAMIC["long"]
def _pre_filter_chunks(self, question: str, min_candidates: int = 3) -> Optional[List[int]]:
"""
IMPROVED pre-filtering - less aggressive, ensures minimum candidates.
Returns None if no filtering should be applied.
"""
if not ENABLE_PRE_FILTER:
return None
question_words = set(re.findall(r'\b\w{3,}\b', question.lower()))
if not question_words:
return None
# Find chunks containing any of the question words
candidate_chunks = set()
for word in question_words:
if word in self.keyword_index:
candidate_chunks.update(self.keyword_index[word])
if not candidate_chunks:
return None
# If we have too few candidates, try to expand
if len(candidate_chunks) < min_candidates:
# Try 2-word combinations
word_list = list(question_words)
for i in range(len(word_list)):
for j in range(i+1, len(word_list)):
if word_list[i] in self.keyword_index and word_list[j] in self.keyword_index:
# Find chunks containing both words
chunks_i = set(self.keyword_index[word_list[i]])
chunks_j = set(self.keyword_index[word_list[j]])
chunks_with_both = chunks_i.intersection(chunks_j)
candidate_chunks.update(chunks_with_both)
# Still too few? Disable filtering
if len(candidate_chunks) < min_candidates:
return None
return list(candidate_chunks)
def _search_faiss_optimized(self, query_embedding: np.ndarray,
top_k: int,
filter_ids: Optional[List[int]] = None) -> List[int]:
"""
Optimized FAISS search with SIMPLIFIED pre-filtering.
Uses post-filtering instead of IDSelectorArray to avoid type issues.
"""
if self.faiss_index is None:
raise ValueError("FAISS index not loaded")
query_embedding = query_embedding.astype(np.float32).reshape(1, -1)
# If we have filter IDs, search more results then filter
if filter_ids:
# Search more results than needed
expanded_k = min(top_k * 3, len(filter_ids))
distances, indices = self.faiss_index.search(query_embedding, expanded_k)
# Convert FAISS indices (0-based) to DB IDs (1-based)
faiss_results = [int(idx + 1) for idx in indices[0] if idx >= 0]
# Filter to only include IDs in our filter list
filtered_results = [idx for idx in faiss_results if idx in filter_ids]
# Return top_k filtered results
return filtered_results[:top_k]
else:
# Regular search
distances, indices = self.faiss_index.search(query_embedding, top_k)
# Convert to Python list (1-based for DB)
return [int(idx + 1) for idx in indices[0] if idx >= 0]
def _compress_prompt(self, chunks: List[str], max_tokens: int = 500) -> List[str]:
"""
Compress/truncate chunks to fit within token limit.
Simple implementation - in production, use better summarization.
"""
if not chunks:
return []
compressed = []
total_length = 0
for chunk in chunks:
chunk_length = len(chunk.split())
if total_length + chunk_length <= max_tokens:
compressed.append(chunk)
total_length += chunk_length
else:
# Truncate last chunk to fit
remaining = max_tokens - total_length
if remaining > 50: # Only include if meaningful
words = chunk.split()[:remaining]
compressed.append(' '.join(words))
break
return compressed
def _generate_response_optimized(self, question: str, chunks: List[str]) -> str:
"""
Optimized response generation with simulated quantization benefits.
"""
# Compress prompt
compressed_chunks = self._compress_prompt(chunks, MAX_TOKENS)
# Simulate quantized model inference (faster)
if compressed_chunks:
# Simple template-based response
context = "\n\n".join(compressed_chunks[:3])
response = f"Based on the relevant information:\n\n{context[:300]}..."
# Add optimization notice
if len(compressed_chunks) < len(chunks):
response += f"\n\n[Optimization: Used {len(compressed_chunks)} of {len(chunks)} chunks after compression]"
else:
response = "I don't have enough relevant information to answer that question."
# Simulate faster generation with quantization (50-150ms vs 100-300ms)
time.sleep(0.08) # 80ms vs 200ms for naive
return response
def query(self, question: str, top_k: Optional[int] = None) -> Tuple[str, int]:
"""
Process a query using optimized RAG.
Returns:
Tuple of (answer, number of chunks used)
"""
if not self._initialized:
self.initialize()
start_time = time.perf_counter()
embedding_time = 0
retrieval_time = 0
generation_time = 0
filter_time = 0
# Check query cache
if ENABLE_QUERY_CACHE:
question_hash = hashlib.md5(question.encode()).hexdigest()
if question_hash in self.query_cache:
cached_answer, timestamp = self.query_cache[question_hash]
# Cache valid for 1 hour
if time.time() - timestamp < 3600:
print(f"[Optimized RAG] Cache hit for query")
return cached_answer, 0
# Step 1: Get embedding (with caching)
embedding_start = time.perf_counter()
cached_embedding = self._get_cached_embedding(question)
if cached_embedding is not None:
query_embedding = cached_embedding
cache_status = "HIT"
else:
query_embedding = self.embedder.encode([question])[0]
self._cache_embedding(question, query_embedding)
cache_status = "MISS"
embedding_time = (time.perf_counter() - embedding_start) * 1000
# Step 2: Pre-filter chunks (IMPROVED)
filter_start = time.perf_counter()
filter_ids = self._pre_filter_chunks(question)
filter_time = (time.perf_counter() - filter_start) * 1000
# Step 3: Determine dynamic top_k
dynamic_k = self._get_dynamic_top_k(question)
effective_k = top_k or dynamic_k
# Step 4: Search with optimizations
retrieval_start = time.perf_counter()
chunk_ids = self._search_faiss_optimized(query_embedding, effective_k, filter_ids)
retrieval_time = (time.perf_counter() - retrieval_start) * 1000
# Step 5: Retrieve chunks
if chunk_ids:
cursor = self.docstore_conn.cursor()
placeholders = ','.join('?' for _ in chunk_ids)
query = f"SELECT chunk_text FROM chunks WHERE id IN ({placeholders}) ORDER BY id"
cursor.execute(query, chunk_ids)
chunks = [r[0] for r in cursor.fetchall()]
else:
chunks = []
# Step 6: Generate optimized response
generation_start = time.perf_counter()
answer = self._generate_response_optimized(question, chunks)
generation_time = (time.perf_counter() - generation_start) * 1000
total_time = (time.perf_counter() - start_time) * 1000
# Cache the result
if ENABLE_QUERY_CACHE and chunks:
question_hash = hashlib.md5(question.encode()).hexdigest()
self.query_cache[question_hash] = (answer, time.time())
# Log metrics
if self.metrics_tracker:
current_memory = self.process.memory_info().rss / 1024 / 1024
self.metrics_tracker.record_query(
model="optimized",
latency_ms=total_time,
memory_mb=current_memory,
chunks_used=len(chunks),
question_length=len(question),
embedding_time=embedding_time,
retrieval_time=retrieval_time,
generation_time=generation_time
)
print(f"[Optimized RAG] Query: '{question[:50]}...'")
print(f" - Embedding: {embedding_time:.2f}ms ({cache_status})")
if filter_ids:
print(f" - Pre-filter: {filter_time:.2f}ms ({len(filter_ids)} candidates)")
print(f" - Retrieval: {retrieval_time:.2f}ms")
print(f" - Generation: {generation_time:.2f}ms")
print(f" - Total: {total_time:.2f}ms")
print(f" - Chunks used: {len(chunks)} (top_k={effective_k}, filtered={filter_ids is not None})")
return answer, len(chunks)
def get_cache_stats(self) -> Dict[str, Any]:
"""Get cache statistics."""
if not self.cache_conn:
return {}
cursor = self.cache_conn.cursor()
cursor.execute("SELECT COUNT(*) FROM embedding_cache")
total = cursor.fetchone()[0]
cursor.execute("SELECT SUM(access_count) FROM embedding_cache")
accesses = cursor.fetchone()[0] or 0
return {
"total_cached": total,
"total_accesses": accesses,
"avg_access_per_item": accesses / total if total > 0 else 0
}
def close(self):
"""Clean up resources."""
if self.docstore_conn:
self.docstore_conn.close()
if self.cache_conn:
self.cache_conn.close()
self._initialized = False
|