Shubham170793 commited on
Commit
de6b3c5
·
verified ·
1 Parent(s): a5cb4a1

Update src/qa.py

Browse files
Files changed (1) hide show
  1. src/qa.py +27 -2
src/qa.py CHANGED
@@ -264,18 +264,41 @@ def retrieve_chunks(query: str, index, chunks: list, top_k: int = 7,
264
  # ==========================================================
265
  # 8️⃣ Answer Generation
266
  # ==========================================================
267
- def generate_answer(query: str, retrieved_chunks: list, reasoning_mode: bool = False):
 
 
 
 
 
 
 
268
  if not retrieved_chunks:
269
  return "Sorry, I couldn’t find relevant information in the document."
270
 
 
271
  try:
272
  chat_llm_local = get_chat_llm()
273
  except Exception:
274
  return "⚠️ GPT-4o not initialized. Check credentials or rebuild the Space."
275
 
 
276
  context = "\n".join(f"[Chunk {i+1}] {chunk.strip()}" for i, chunk in enumerate(retrieved_chunks))
277
- prompt = (REASONING_PROMPT if reasoning_mode else STRICT_PROMPT).format(context=context, query=query)
278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  messages = [
280
  {"role": "system", "content":
281
  "You are an expert enterprise documentation assistant. "
@@ -287,6 +310,7 @@ def generate_answer(query: str, retrieved_chunks: list, reasoning_mode: bool = F
287
  {"role": "user", "content": prompt},
288
  ]
289
 
 
290
  try:
291
  response = chat_llm_local.invoke(messages)
292
  return response.content.strip()
@@ -295,6 +319,7 @@ def generate_answer(query: str, retrieved_chunks: list, reasoning_mode: bool = F
295
  return "⚠️ Error: Could not generate an answer."
296
 
297
 
 
298
  # ==========================================================
299
  # 9️⃣ Generic Text Generation Helper
300
  # ==========================================================
 
264
  # ==========================================================
265
  # 8️⃣ Answer Generation
266
  # ==========================================================
267
+ # ==========================================================
268
+ # 8️⃣ Answer Generation (Lazy GPT-4o Initialization + Language-Aware)
269
+ # ==========================================================
270
+ def generate_answer(query: str, retrieved_chunks: list, reasoning_mode: bool = False, doc_lang: str = "en"):
271
+ """
272
+ Generates an answer using GPT-4o (SAP Gen AI Hub proxy).
273
+ Now supports Hindi or English response formatting automatically.
274
+ """
275
  if not retrieved_chunks:
276
  return "Sorry, I couldn’t find relevant information in the document."
277
 
278
+ # Try lazy initialization
279
  try:
280
  chat_llm_local = get_chat_llm()
281
  except Exception:
282
  return "⚠️ GPT-4o not initialized. Check credentials or rebuild the Space."
283
 
284
+ # Build context
285
  context = "\n".join(f"[Chunk {i+1}] {chunk.strip()}" for i, chunk in enumerate(retrieved_chunks))
 
286
 
287
+ # 🌐 Language-specific prompt logic
288
+ if doc_lang == "hi":
289
+ # Hindi-language response
290
+ prompt = (
291
+ f"आप एक दस्तावेज़ सहायक हैं जो दिए गए अंशों के आधार पर सटीक उत्तर देता है। "
292
+ f"कृपया नीचे दिए गए संदर्भ का उपयोग करते हुए प्रश्न का उत्तर हिंदी में दें। "
293
+ f"यदि उत्तर स्पष्ट रूप से दस्तावेज़ में नहीं है, तो कहें — "
294
+ f"'मुझे इस दस्तावेज़ के आधार पर उत्तर ज्ञात नहीं है।'\n\n"
295
+ f"संदर्भ:\n{context}\n\nप्रश्न: {query}\nउत्तर:"
296
+ )
297
+ else:
298
+ # Default English prompts
299
+ prompt = (REASONING_PROMPT if reasoning_mode else STRICT_PROMPT).format(context=context, query=query)
300
+
301
+ # System role
302
  messages = [
303
  {"role": "system", "content":
304
  "You are an expert enterprise documentation assistant. "
 
310
  {"role": "user", "content": prompt},
311
  ]
312
 
313
+ # Generate answer
314
  try:
315
  response = chat_llm_local.invoke(messages)
316
  return response.content.strip()
 
319
  return "⚠️ Error: Could not generate an answer."
320
 
321
 
322
+
323
  # ==========================================================
324
  # 9️⃣ Generic Text Generation Helper
325
  # ==========================================================