ChatbotDemo / app /policy_vector_db.py
Kalpokoch's picture
updated
a9681af
raw
history blame
5.32 kB
import os
import json
import torch
from typing import List, Dict
from sentence_transformers import SentenceTransformer
import chromadb
from chromadb.config import Settings
import logging
logger = logging.getLogger("app")
class PolicyVectorDB:
def __init__(self, persist_directory: str, top_k_default: int = 5, relevance_threshold: float = 0.5):
self.persist_directory = persist_directory
self.client = chromadb.PersistentClient(path=persist_directory, settings=Settings(allow_reset=True))
self.collection_name = "neepco_dop_policies"
# Using a faster, smaller model is recommended for better performance on CPU
self.embedding_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2', device='cpu')
self.collection = None
self.top_k_default = top_k_default
self.relevance_threshold = relevance_threshold
def _get_collection(self):
if self.collection is None:
self.collection = self.client.get_or_create_collection(
name=self.collection_name,
metadata={"description": "NEEPCO Delegation of Powers Policy"}
)
return self.collection
def _flatten_metadata(self, metadata: Dict) -> Dict:
return {key: str(value) for key, value in metadata.items()}
def add_chunks(self, chunks: List[Dict]):
collection = self._get_collection()
if not chunks:
logger.info("No chunks provided to add.")
return
existing_ids = set()
try:
# Check for existing IDs to avoid trying to re-insert them
existing_ids = set(collection.get(ids=[str(c['id']) for c in chunks if c.get('id')])['ids'])
except Exception:
logger.warning("Could not efficiently retrieve existing IDs. Proceeding with add, ChromaDB will handle duplicates.")
existing_ids = set()
new_chunks = [chunk for chunk in chunks if chunk.get('id') and str(chunk.get('id')) not in existing_ids]
if not new_chunks:
logger.info("No new chunks to add to the database.")
return
logger.info(f"Adding {len(new_chunks)} new chunks to the vector database...")
batch_size = 64 # Smaller batch size can be more stable for large embeddings
for i in range(0, len(new_chunks), batch_size):
batch = new_chunks[i:i + batch_size]
texts = [chunk['text'] for chunk in batch]
ids = [str(chunk['id']) for chunk in batch]
metadatas = []
for chunk in batch:
meta = chunk.get('metadata')
if not meta: # Handles cases where metadata is missing or empty
meta = {"description": "General information chunk."}
metadatas.append(self._flatten_metadata(meta))
embeddings = self.embedding_model.encode(texts, show_progress_bar=False).tolist()
collection.add(ids=ids, embeddings=embeddings, documents=texts, metadatas=metadatas)
logger.info(f"Added batch {i//batch_size + 1}/{(len(new_chunks) + batch_size - 1) // batch_size}")
logger.info(f"Finished adding {len(new_chunks)} chunks.")
def search(self, query_text: str, top_k: int = None) -> List[Dict]:
collection = self._get_collection()
query_embedding = self.embedding_model.encode([query_text]).tolist()
top_k = top_k if top_k else self.top_k_default
results = collection.query(
query_embeddings=query_embedding,
n_results=top_k,
include=["documents", "metadatas", "distances"]
)
search_results = []
if results and results['documents'] and results['documents'][0]:
for i, doc in enumerate(results['documents'][0]):
relevance_score = 1 - results['distances'][0][i]
search_results.append({
'text': doc,
'metadata': results['metadatas'][0][i],
'relevance_score': relevance_score
})
return search_results
def ensure_db_populated(db_instance: PolicyVectorDB, chunks_file_path: str):
try:
if db_instance._get_collection().count() == 0:
logger.info("Vector database is empty. Attempting to populate from chunks file.")
if not os.path.exists(chunks_file_path):
logger.error(f"Chunks file not found at {chunks_file_path}. Cannot populate DB.")
return False
# This is the correct method for a standard .json file
with open(chunks_file_path, 'r', encoding='utf-8') as f:
chunks_to_add = json.load(f)
if not chunks_to_add:
logger.warning(f"Chunks file at {chunks_file_path} is empty. No data to add to DB.")
return False
db_instance.add_chunks(chunks_to_add)
logger.info("Vector database population attempt complete.")
return True
else:
logger.info("Vector database already contains data. Skipping population.")
return True
except Exception as e:
logger.error(f"DB Population Error: {e}", exc_info=True)
return False