Shubham170793 commited on
Commit
36665e6
·
verified ·
1 Parent(s): 24deec1

Update src/qa.py

Browse files
Files changed (1) hide show
  1. src/qa.py +13 -4
src/qa.py CHANGED
@@ -3,7 +3,8 @@
3
  # ----------------------------
4
  import os
5
 
6
- CACHE_DIR = "/home/user/huggingface"
 
7
  os.makedirs(CACHE_DIR, exist_ok=True)
8
 
9
  os.environ["HF_HOME"] = CACHE_DIR
@@ -11,6 +12,8 @@ os.environ["TRANSFORMERS_CACHE"] = CACHE_DIR
11
  os.environ["HF_DATASETS_CACHE"] = CACHE_DIR
12
  os.environ["HF_MODULES_CACHE"] = CACHE_DIR
13
 
 
 
14
  # ----------------------------
15
  # Imports AFTER cache bootstrap
16
  # ----------------------------
@@ -18,8 +21,6 @@ from sentence_transformers import SentenceTransformer
18
  from transformers import pipeline
19
  from vectorstore import search_faiss
20
 
21
- print("✅ qa.py loaded from:", __file__)
22
-
23
  # ----------------------------
24
  # Query embedding model
25
  # ----------------------------
@@ -48,4 +49,12 @@ def retrieve_chunks(query, index, chunks, top_k=3):
48
 
49
  def generate_answer(query, retrieved_chunks):
50
  if not retrieved_chunks:
51
- return "Sorry,
 
 
 
 
 
 
 
 
 
3
  # ----------------------------
4
  import os
5
 
6
+ uid = os.getuid() # current user id (0, 1000, etc.)
7
+ CACHE_DIR = f"/home/user-{uid}/huggingface"
8
  os.makedirs(CACHE_DIR, exist_ok=True)
9
 
10
  os.environ["HF_HOME"] = CACHE_DIR
 
12
  os.environ["HF_DATASETS_CACHE"] = CACHE_DIR
13
  os.environ["HF_MODULES_CACHE"] = CACHE_DIR
14
 
15
+ print(f"✅ qa.py using Hugging Face cache at {CACHE_DIR}")
16
+
17
  # ----------------------------
18
  # Imports AFTER cache bootstrap
19
  # ----------------------------
 
21
  from transformers import pipeline
22
  from vectorstore import search_faiss
23
 
 
 
24
  # ----------------------------
25
  # Query embedding model
26
  # ----------------------------
 
49
 
50
  def generate_answer(query, retrieved_chunks):
51
  if not retrieved_chunks:
52
+ return "Sorry, I could not find relevant information."
53
+
54
+ context = " ".join(retrieved_chunks)
55
+ prompt = (
56
+ "You are an assistant. Use the context to answer the question clearly.\n"
57
+ f"Context:\n{context}\n\nQuestion:\n{query}\n\nAnswer:"
58
+ )
59
+ result = _answer_model(prompt, max_length=300, do_sample=False)
60
+ return result[0]["generated_text"].strip()