Create qa.py
Browse files
src/qa.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/qa.py
|
| 2 |
+
from sentence_transformers import SentenceTransformer
|
| 3 |
+
from vectorstore import search_faiss
|
| 4 |
+
from transformers import pipeline
|
| 5 |
+
|
| 6 |
+
# Load embedding model (for query encoding)
|
| 7 |
+
_query_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 8 |
+
|
| 9 |
+
# Load Hugging Face generative model (FLAN-T5-small is free & CPU friendly)
|
| 10 |
+
_answer_model = pipeline("text2text-generation", model="google/flan-t5-small")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def retrieve_chunks(query: str, index, chunks: list, top_k: int = 3):
|
| 14 |
+
"""Encode query and return top_k matching chunks from FAISS index."""
|
| 15 |
+
query_embedding = _query_model.encode([query], convert_to_numpy=True)[0]
|
| 16 |
+
return search_faiss(query_embedding, index, chunks, top_k)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def generate_answer(query: str, retrieved_chunks: list) -> str:
|
| 20 |
+
"""Generate a natural language answer using Hugging Face model."""
|
| 21 |
+
if not retrieved_chunks:
|
| 22 |
+
return "Sorry, I could not find relevant information in the document."
|
| 23 |
+
|
| 24 |
+
# Combine retrieved chunks into a single context
|
| 25 |
+
context = " ".join(retrieved_chunks)
|
| 26 |
+
|
| 27 |
+
# ✅ Stronger prompt to guide the model
|
| 28 |
+
prompt = f"""
|
| 29 |
+
You are a helpful assistant.
|
| 30 |
+
Read the context carefully and answer the question with clear, step-by-step instructions.
|
| 31 |
+
Do not just repeat the context; explain in your own words.
|
| 32 |
+
|
| 33 |
+
Context:
|
| 34 |
+
{context}
|
| 35 |
+
|
| 36 |
+
Question:
|
| 37 |
+
{query}
|
| 38 |
+
|
| 39 |
+
Answer (in numbered steps):
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
# Run the Hugging Face pipeline
|
| 43 |
+
result = _answer_model(prompt, max_length=300, clean_up_tokenization_spaces=True)
|
| 44 |
+
return result[0]["generated_text"].strip()
|