Sandei commited on
Commit
b907dad
·
1 Parent(s): 2605038

hello fix

Browse files
Files changed (1) hide show
  1. service/rag_service.py +19 -12
service/rag_service.py CHANGED
@@ -39,22 +39,31 @@ vector_store = VectorStoreService(embeddings, documents)
39
  # -----------------------------
40
  def generate_answer(question: str, k: int = 3) -> str:
41
  """
42
- Generates a detailed answer for a question using:
43
- - RAG (top-k retrieved context from CSV)
44
- - TinyLlama LLM for natural language generation
45
  """
46
 
47
- # 1️⃣ Embed the question
48
  query_vec = embedder.embed([question])[0]
 
49
 
50
- # 2️⃣ Retrieve top-k context documents
51
- top_docs = vector_store.search(query_vec, k=k) # list of context strings
 
 
 
 
 
52
 
53
- # 3️⃣ Build a clear instructional prompt
54
- context_text = "\n\n".join(top_docs)
 
55
  prompt = f"""
56
- You are a helpful IT support assistant. Use the context below to provide a detailed, step-by-step troubleshooting guide.
57
- If the answer is not in the context, say "I don't know".
 
 
 
 
58
 
59
  Context:
60
  {context_text}
@@ -64,6 +73,4 @@ User question: {question}
64
  Answer:
65
  <|assistant|>
66
  """
67
-
68
- # 4️⃣ Generate response using LLM
69
  return llm.generate(prompt)
 
39
  # -----------------------------
40
  def generate_answer(question: str, k: int = 3) -> str:
41
  """
42
+ Generate an answer strictly from context.
43
+ If the answer is not present, return the context instead of making up a solution.
 
44
  """
45
 
 
46
  query_vec = embedder.embed([question])[0]
47
+ top_docs = vector_store.search(query_vec, k=k)
48
 
49
+ # Deduplicate context
50
+ seen = set()
51
+ unique_docs = []
52
+ for doc in top_docs:
53
+ if doc not in seen:
54
+ seen.add(doc)
55
+ unique_docs.append(doc)
56
 
57
+ context_text = "\n\n".join(unique_docs)
58
+
59
+ # Build clear, strict prompt
60
  prompt = f"""
61
+ You are a helpful IT support assistant.
62
+
63
+ - ONLY answer based on the context below.
64
+ - DO NOT hallucinate or invent new steps.
65
+ - If the answer is not explicitly present in the context, return the context itself.
66
+ - Keep answers concise and step-by-step if possible.
67
 
68
  Context:
69
  {context_text}
 
73
  Answer:
74
  <|assistant|>
75
  """
 
 
76
  return llm.generate(prompt)