relevant update
Browse files- service/rag_service.py +35 -32
service/rag_service.py
CHANGED
|
@@ -1,62 +1,63 @@
|
|
| 1 |
-
import pickle
|
| 2 |
-
from pathlib import Path
|
| 3 |
-
|
| 4 |
from service.data_loader_service import CSVDataLoader
|
| 5 |
from service.embedded_service import EmbeddingService
|
| 6 |
from service.vector_store_service import VectorStoreService
|
| 7 |
from service.llm_service import LLMService
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
# -----------------------------
|
| 10 |
-
#
|
| 11 |
-
# -----------------------------
|
| 12 |
-
CACHE_PATH = Path("embeddings.pkl")
|
| 13 |
-
CSV_FILE = "final_data_set(in).csv" # your QA CSV file
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
#
|
| 18 |
embedder = EmbeddingService()
|
| 19 |
llm = LLMService()
|
| 20 |
|
| 21 |
# Load documents
|
| 22 |
-
loader = CSVDataLoader(
|
| 23 |
-
documents = loader.load_qa_pairs()
|
| 24 |
|
| 25 |
# Load or compute embeddings
|
| 26 |
-
if
|
| 27 |
-
with
|
| 28 |
embeddings = pickle.load(f)
|
| 29 |
else:
|
| 30 |
embeddings = embedder.embed(documents)
|
| 31 |
-
with
|
| 32 |
pickle.dump(embeddings, f)
|
| 33 |
|
| 34 |
vector_store = VectorStoreService(embeddings, documents)
|
| 35 |
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
# Generate Answer Function
|
| 39 |
-
# -----------------------------
|
| 40 |
-
def generate_answer(question: str, k: int = 3) -> str:
|
| 41 |
"""
|
| 42 |
-
|
| 43 |
-
|
|
|
|
| 44 |
"""
|
| 45 |
|
|
|
|
| 46 |
query_vec = embedder.embed([question])[0]
|
| 47 |
-
top_docs = vector_store.search(query_vec, k=k)
|
| 48 |
|
| 49 |
-
#
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
seen.add(doc)
|
| 55 |
-
unique_docs.append(doc)
|
| 56 |
|
| 57 |
-
|
|
|
|
|
|
|
| 58 |
|
| 59 |
-
#
|
|
|
|
|
|
|
|
|
|
| 60 |
prompt = f"""
|
| 61 |
You are a helpful IT support assistant.
|
| 62 |
|
|
@@ -73,4 +74,6 @@ User question: {question}
|
|
| 73 |
Answer:
|
| 74 |
<|assistant|>
|
| 75 |
"""
|
|
|
|
|
|
|
| 76 |
return llm.generate(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from service.data_loader_service import CSVDataLoader
|
| 2 |
from service.embedded_service import EmbeddingService
|
| 3 |
from service.vector_store_service import VectorStoreService
|
| 4 |
from service.llm_service import LLMService
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
import pickle
|
| 7 |
|
| 8 |
+
# -------------------------------
|
| 9 |
+
# Setup
|
| 10 |
+
# -------------------------------
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
CACHE = Path("embeddings.pkl")
|
| 13 |
+
|
| 14 |
+
# Embedder and LLM
|
| 15 |
embedder = EmbeddingService()
|
| 16 |
llm = LLMService()
|
| 17 |
|
| 18 |
# Load documents
|
| 19 |
+
loader = CSVDataLoader("final_data_set(in).csv")
|
| 20 |
+
documents = loader.load_qa_pairs()
|
| 21 |
|
| 22 |
# Load or compute embeddings
|
| 23 |
+
if CACHE.exists():
|
| 24 |
+
with CACHE.open("rb") as f:
|
| 25 |
embeddings = pickle.load(f)
|
| 26 |
else:
|
| 27 |
embeddings = embedder.embed(documents)
|
| 28 |
+
with CACHE.open("wb") as f:
|
| 29 |
pickle.dump(embeddings, f)
|
| 30 |
|
| 31 |
vector_store = VectorStoreService(embeddings, documents)
|
| 32 |
|
| 33 |
+
# -------------------------------
|
| 34 |
+
# Generate answer function
|
| 35 |
+
# -------------------------------
|
| 36 |
|
| 37 |
+
def generate_answer(question: str, k: int = 3, min_similarity: float = 0.65) -> str:
|
|
|
|
|
|
|
|
|
|
| 38 |
"""
|
| 39 |
+
Generates answer using RAG (retrieval + LLM).
|
| 40 |
+
Only includes context with similarity above threshold.
|
| 41 |
+
Returns fallback if no relevant context.
|
| 42 |
"""
|
| 43 |
|
| 44 |
+
# Compute embedding for the question
|
| 45 |
query_vec = embedder.embed([question])[0]
|
|
|
|
| 46 |
|
| 47 |
+
# Get top-k results with scores
|
| 48 |
+
results = vector_store.search_with_scores(query_vec, k=k) # returns list of (doc, score)
|
| 49 |
+
|
| 50 |
+
# Filter by similarity threshold
|
| 51 |
+
top_docs = [doc for doc, score in results if score >= min_similarity]
|
|
|
|
|
|
|
| 52 |
|
| 53 |
+
if not top_docs:
|
| 54 |
+
# No relevant context found
|
| 55 |
+
return "I’m sorry, I don’t have relevant information in my knowledge base for this query."
|
| 56 |
|
| 57 |
+
# Join context for the prompt
|
| 58 |
+
context_text = "\n\n".join(top_docs)
|
| 59 |
+
|
| 60 |
+
# Build prompt for TinyLlama
|
| 61 |
prompt = f"""
|
| 62 |
You are a helpful IT support assistant.
|
| 63 |
|
|
|
|
| 74 |
Answer:
|
| 75 |
<|assistant|>
|
| 76 |
"""
|
| 77 |
+
|
| 78 |
+
# Generate answer
|
| 79 |
return llm.generate(prompt)
|