Shubham170793 commited on
Commit
d5f56bf
·
verified ·
1 Parent(s): 8db2f50

Update src/qa.py

Browse files
Files changed (1) hide show
  1. src/qa.py +55 -85
src/qa.py CHANGED
@@ -1,29 +1,23 @@
1
  """
2
- qa.py — Phi-2 FAST + ReRank (with FULL Reasoning Mode)
3
- -------------------------------------------------------
4
- ✅ Semantic retrieval (FAISS + cosine re-rank + neighbor-fill)
5
- ✅ Smart factual mode
6
  ✅ Deep reasoning mode (ChatGPT-like)
7
  """
8
 
9
  import os
 
10
  import numpy as np
11
  from sentence_transformers import SentenceTransformer
12
  from sklearn.metrics.pairwise import cosine_similarity
13
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
14
- import torch
15
-
16
- print("✅ qa.py (Phi-2 FAST + ReRank + Full Reasoning) loaded from:", __file__)
17
-
18
- api_key = os.getenv("OPENAI_API_KEY")
19
- if not api_key:
20
- print("❌ OPENAI_API_KEY not found in environment!")
21
- else:
22
- print("✅ OPENAI_API_KEY loaded successfully (length:", len(api_key), ")")
23
 
 
24
 
25
  # ==========================================================
26
- # 1️⃣ Cache Setup
27
  # ==========================================================
28
  CACHE_DIR = "/tmp/hf_cache"
29
  os.makedirs(CACHE_DIR, exist_ok=True)
@@ -35,7 +29,7 @@ os.environ.update({
35
  })
36
 
37
  # ==========================================================
38
- # 2️⃣ Embedding Model
39
  # ==========================================================
40
  try:
41
  _query_model = SentenceTransformer("intfloat/e5-small-v2", cache_folder=CACHE_DIR)
@@ -45,15 +39,9 @@ except Exception as e:
45
  _query_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2", cache_folder=CACHE_DIR)
46
 
47
  # ==========================================================
48
- # 3️⃣ GPT-4o Model Setup (SAP Gen AI Hub)
49
  # ==========================================================
50
- import json, os
51
- from gen_ai_hub.proxy.core.proxy_clients import get_proxy_client
52
- from gen_ai_hub.proxy.langchain.openai import ChatOpenAI
53
-
54
  print("✅ Loading GPT-4o via SAP Gen AI Hub...")
55
-
56
- # Load JSON credentials
57
  CRED_PATH = os.path.join(os.path.dirname(__file__), "irpa-r1208-hands-on-exercises-sk.json")
58
 
59
  try:
@@ -75,40 +63,37 @@ try:
75
  temperature=0.3,
76
  max_tokens=800
77
  )
78
-
79
  print("✅ GPT-4o (via Gen AI Hub) ready for generation.")
80
-
81
  except Exception as e:
82
  print(f"⚠️ Gen AI Hub setup failed: {e}")
83
  chat_llm = None
84
 
85
-
86
  # ==========================================================
87
- # 4️⃣ Prompts
88
  # ==========================================================
89
  STRICT_PROMPT = (
90
  "You are an enterprise documentation assistant.\n"
91
- "Use ONLY the CONTEXT below to answer the QUESTION clearly and factually.\n"
92
- "If the answer isn’t in the document, reply exactly:\n"
93
  "'I don't know based on the provided document.'\n\n"
94
  "Context:\n{context}\n\nQuestion: {query}\nAnswer:"
95
  )
96
 
97
  REASONING_PROMPT = (
98
- "You are an expert enterprise assistant capable of deep reasoning.\n"
99
- "Think step by step before answering. Use the CONTEXT below first, but also apply your world knowledge logically.\n"
100
- "Explain your reasoning concisely if it helps clarity.\n"
101
- "Avoid hallucination — if the document does not include the answer, say:\n"
102
  "'I don't know based on the provided document.'\n\n"
103
- "Context:\n{context}\n\nQuestion: {query}\nLet's reason this out carefully:\nAnswer:"
104
  )
105
 
106
  # ==========================================================
107
- # 5️⃣ Retrieval — FAISS + Re-rank + Neighbor Fill
108
  # ==========================================================
109
  def retrieve_chunks(query: str, index, chunks: list, top_k: int = 5,
110
  min_similarity: float = 0.6, candidate_multiplier: int = 3):
111
- """Re-rank and optionally fill with neighbors for context continuity."""
112
  if not index or not chunks:
113
  return []
114
 
@@ -117,11 +102,11 @@ def retrieve_chunks(query: str, index, chunks: list, top_k: int = 5,
117
  [f"query: {query.strip()}"], convert_to_numpy=True, normalize_embeddings=True
118
  )[0]
119
 
120
- # Initial FAISS search
121
  distances, indices = index.search(np.array([q_emb]).astype("float32"), top_k * candidate_multiplier)
122
- candidate_indices = list(dict.fromkeys(indices[0])) # dedup
123
 
124
- # Re-rank by cosine similarity
125
  doc_embs = _query_model.encode(
126
  [f"passage: {chunks[i]}" for i in candidate_indices],
127
  convert_to_numpy=True,
@@ -130,82 +115,67 @@ def retrieve_chunks(query: str, index, chunks: list, top_k: int = 5,
130
  sims = cosine_similarity([q_emb], doc_embs)[0]
131
  ranked = sorted(zip(candidate_indices, sims), key=lambda x: x[1], reverse=True)
132
 
133
- # Filter by min_similarity
134
- filtered = [idx for idx, sim in ranked if sim >= min_similarity]
135
- if len(filtered) > top_k:
136
- filtered = filtered[:top_k]
137
 
138
- # Neighbor fill if needed
139
  if len(filtered) < top_k:
140
  expanded = set(filtered)
141
  for idx in filtered:
142
- for neighbor in [idx - 1, idx + 1]:
143
- if 0 <= neighbor < len(chunks):
144
- expanded.add(neighbor)
145
  if len(expanded) >= top_k:
146
  break
147
  if len(expanded) >= top_k:
148
  break
149
  filtered = sorted(expanded)[:top_k]
150
 
151
- return [chunks[i] for i in filtered]
 
 
152
 
153
  except Exception as e:
154
  print(f"⚠️ Retrieval error: {e}")
155
  return []
156
 
157
  # ==========================================================
158
- # 6️⃣ Answer Generation (GPT-4o with Full Reasoning)
159
  # ==========================================================
160
- from openai import OpenAI
161
-
162
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
163
- MODEL_NAME = "gpt-4o"
164
-
165
  def generate_answer(query: str, retrieved_chunks: list, reasoning_mode: bool = False):
166
  """
167
- Generates answers using GPT-4o.
168
- - reasoning_mode=False strict factual mode (fast)
169
- - reasoning_mode=True → reasoning-rich mode (longer, more explanatory)
170
  """
171
  if not retrieved_chunks:
172
  return "Sorry, I couldn’t find relevant information in the document."
 
 
173
 
174
- # Format context with chunk tags
175
  context = "\n".join(f"[Chunk {i+1}] {chunk.strip()}" for i, chunk in enumerate(retrieved_chunks))
176
- prompt = (REASONING_PROMPT if reasoning_mode else STRICT_PROMPT).format(
177
- context=context, query=query
178
- )
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  try:
181
- response = client.chat.completions.create(
182
- model=MODEL_NAME,
183
- messages=[
184
- {
185
- "role": "system",
186
- "content": (
187
- "You are an expert enterprise documentation assistant. "
188
- "Answer questions precisely using the provided context. "
189
- "If reasoning_mode is enabled, provide deeper explanations and step-by-step logic. "
190
- "If the document lacks information, respond exactly: "
191
- "'I don't know based on the provided document.'"
192
- ),
193
- },
194
- {"role": "user", "content": prompt},
195
- ],
196
- temperature=0.6 if reasoning_mode else 0.2,
197
- max_tokens=600 if reasoning_mode else 350,
198
- top_p=0.95,
199
- )
200
-
201
- text = response.choices[0].message.content.strip()
202
- return text
203
-
204
  except Exception as e:
205
  print(f"⚠️ GPT-4o generation failed: {e}")
206
  return "⚠️ Error: Could not generate an answer."
207
 
208
-
209
  # ==========================================================
210
  # 7️⃣ Local Test
211
  # ==========================================================
 
1
  """
2
+ qa.py — GPT-4o (SAP Gen AI Hub) + ReRank Retrieval
3
+ --------------------------------------------------
4
+ ✅ Semantic retrieval (FAISS + cosine re-rank + neighbor fill)
5
+ ✅ Smart factual mode (fast)
6
  ✅ Deep reasoning mode (ChatGPT-like)
7
  """
8
 
9
  import os
10
+ import json
11
  import numpy as np
12
  from sentence_transformers import SentenceTransformer
13
  from sklearn.metrics.pairwise import cosine_similarity
14
+ from gen_ai_hub.proxy.core.proxy_clients import get_proxy_client
15
+ from gen_ai_hub.proxy.langchain.openai import ChatOpenAI
 
 
 
 
 
 
 
 
16
 
17
+ print("✅ qa.py (GPT-4o via Gen AI Hub + ReRank) loaded from:", __file__)
18
 
19
  # ==========================================================
20
+ # 1️⃣ Hugging Face Cache
21
  # ==========================================================
22
  CACHE_DIR = "/tmp/hf_cache"
23
  os.makedirs(CACHE_DIR, exist_ok=True)
 
29
  })
30
 
31
  # ==========================================================
32
+ # 2️⃣ Embedding Model (E5-small-v2)
33
  # ==========================================================
34
  try:
35
  _query_model = SentenceTransformer("intfloat/e5-small-v2", cache_folder=CACHE_DIR)
 
39
  _query_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2", cache_folder=CACHE_DIR)
40
 
41
  # ==========================================================
42
+ # 3️⃣ GPT-4o via SAP Gen AI Hub
43
  # ==========================================================
 
 
 
 
44
  print("✅ Loading GPT-4o via SAP Gen AI Hub...")
 
 
45
  CRED_PATH = os.path.join(os.path.dirname(__file__), "irpa-r1208-hands-on-exercises-sk.json")
46
 
47
  try:
 
63
  temperature=0.3,
64
  max_tokens=800
65
  )
 
66
  print("✅ GPT-4o (via Gen AI Hub) ready for generation.")
 
67
  except Exception as e:
68
  print(f"⚠️ Gen AI Hub setup failed: {e}")
69
  chat_llm = None
70
 
 
71
  # ==========================================================
72
+ # 4️⃣ Prompt Templates
73
  # ==========================================================
74
  STRICT_PROMPT = (
75
  "You are an enterprise documentation assistant.\n"
76
+ "Answer clearly and factually using ONLY the CONTEXT below.\n"
77
+ "If the answer is not in the document, reply exactly:\n"
78
  "'I don't know based on the provided document.'\n\n"
79
  "Context:\n{context}\n\nQuestion: {query}\nAnswer:"
80
  )
81
 
82
  REASONING_PROMPT = (
83
+ "You are an expert enterprise assistant capable of reasoning.\n"
84
+ "Think step by step. Base your answer primarily on the CONTEXT, "
85
+ "but apply logical inference only when necessary.\n"
86
+ "If the document lacks the answer, say exactly:\n"
87
  "'I don't know based on the provided document.'\n\n"
88
+ "Context:\n{context}\n\nQuestion: {query}\nLet's reason step-by-step:\nAnswer:"
89
  )
90
 
91
  # ==========================================================
92
+ # 5️⃣ Retrieval — FAISS + Cosine Re-Rank + Neighbor Fill
93
  # ==========================================================
94
  def retrieve_chunks(query: str, index, chunks: list, top_k: int = 5,
95
  min_similarity: float = 0.6, candidate_multiplier: int = 3):
96
+ """Select top chunks via FAISS, rerank by cosine similarity, fill gaps with neighbors."""
97
  if not index or not chunks:
98
  return []
99
 
 
102
  [f"query: {query.strip()}"], convert_to_numpy=True, normalize_embeddings=True
103
  )[0]
104
 
105
+ # 1️⃣ Initial FAISS search
106
  distances, indices = index.search(np.array([q_emb]).astype("float32"), top_k * candidate_multiplier)
107
+ candidate_indices = list(dict.fromkeys(indices[0])) # dedup, preserve order
108
 
109
+ # 2️⃣ Compute true cosine similarity for rerank
110
  doc_embs = _query_model.encode(
111
  [f"passage: {chunks[i]}" for i in candidate_indices],
112
  convert_to_numpy=True,
 
115
  sims = cosine_similarity([q_emb], doc_embs)[0]
116
  ranked = sorted(zip(candidate_indices, sims), key=lambda x: x[1], reverse=True)
117
 
118
+ # 3️⃣ Keep only chunks meeting threshold
119
+ filtered = [idx for idx, sim in ranked if sim >= min_similarity][:top_k]
 
 
120
 
121
+ # 4️⃣ Neighbor fill if not enough
122
  if len(filtered) < top_k:
123
  expanded = set(filtered)
124
  for idx in filtered:
125
+ for nb in [idx - 1, idx + 1]:
126
+ if 0 <= nb < len(chunks):
127
+ expanded.add(nb)
128
  if len(expanded) >= top_k:
129
  break
130
  if len(expanded) >= top_k:
131
  break
132
  filtered = sorted(expanded)[:top_k]
133
 
134
+ final_chunks = [chunks[i] for i in filtered]
135
+ print(f"✅ Retrieved {len(final_chunks)} chunks (semantic + neighbor fill)")
136
+ return final_chunks
137
 
138
  except Exception as e:
139
  print(f"⚠️ Retrieval error: {e}")
140
  return []
141
 
142
  # ==========================================================
143
+ # 6️⃣ Answer Generation GPT-4o via Gen AI Hub
144
  # ==========================================================
 
 
 
 
 
145
  def generate_answer(query: str, retrieved_chunks: list, reasoning_mode: bool = False):
146
  """
147
+ reasoning_mode=False strict factual mode (fast)
148
+ reasoning_mode=True deep reasoning mode (ChatGPT-like)
 
149
  """
150
  if not retrieved_chunks:
151
  return "Sorry, I couldn’t find relevant information in the document."
152
+ if chat_llm is None:
153
+ return "⚠️ GPT-4o not initialized. Check credentials or rebuild the Space."
154
 
155
+ # Combine chunks with markers
156
  context = "\n".join(f"[Chunk {i+1}] {chunk.strip()}" for i, chunk in enumerate(retrieved_chunks))
157
+ prompt = (REASONING_PROMPT if reasoning_mode else STRICT_PROMPT).format(context=context, query=query)
158
+
159
+ messages = [
160
+ {
161
+ "role": "system",
162
+ "content": (
163
+ "You are an expert enterprise documentation assistant. "
164
+ "Answer only using provided context; if reasoning_mode is on, explain briefly. "
165
+ "If answer not in document, say exactly: "
166
+ "'I don't know based on the provided document.'"
167
+ ),
168
+ },
169
+ {"role": "user", "content": prompt},
170
+ ]
171
 
172
  try:
173
+ response = chat_llm.invoke(messages)
174
+ return response.content.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  except Exception as e:
176
  print(f"⚠️ GPT-4o generation failed: {e}")
177
  return "⚠️ Error: Could not generate an answer."
178
 
 
179
  # ==========================================================
180
  # 7️⃣ Local Test
181
  # ==========================================================