File size: 12,556 Bytes
cd266a5 d5f56bf d73a9dd fce2fdd d5f56bf 9466a37 0f5b8d7 cd266a5 ebbd49e d73a9dd d5f56bf f86b15f e97699c 9f0da7b 41ac7b0 d5f56bf 1242abb f86b15f 1242abb fbd4778 f86b15f fbd4778 50ab09a 0c81fa1 cd266a5 6718956 fbd4778 acebfd4 fbd4778 43b802c acebfd4 f86b15f acebfd4 a610ce4 fbd4778 fea3890 cd266a5 fbd4778 d5f56bf fbd4778 8db2f50 e00eaea 8db2f50 353bd1c 8db2f50 03ee985 fbd4778 03ee985 f86b15f fce2fdd abc5c49 fce2fdd f86b15f cfc4d8a 03ee985 abc5c49 03ee985 abc5c49 fce2fdd 03ee985 fce2fdd abc5c49 f86b15f abc5c49 f86b15f abc5c49 f86b15f fce2fdd 03ee985 abc5c49 f86b15f 03ee985 fbd4778 f384f96 197e569 0671dc0 d511cfa 0671dc0 fbd4778 386cde6 f384f96 d5f56bf edaeee6 fbd4778 d5f56bf f384f96 fbd4778 03ee985 fbd4778 d73a9dd fbd4778 1b878f3 0671dc0 fea3890 1b878f3 fea3890 41ac7b0 235a5b5 0671dc0 1b878f3 235a5b5 0671dc0 f86b15f 0671dc0 f86b15f 0671dc0 f86b15f 0671dc0 235a5b5 9466a37 235a5b5 9466a37 235a5b5 0671dc0 d73a9dd f86b15f 03ee985 f86b15f d73a9dd f86b15f d73a9dd 0671dc0 d5f56bf d73a9dd d5f56bf 235a5b5 1b878f3 235a5b5 fbd4778 03ee985 fbd4778 f384f96 fea3890 d5f56bf fea3890 fbd4778 d5f56bf f86b15f d5f56bf cd6e69b c7133f4 d5f56bf 743f89e fe9b982 386cde6 fea3890 fbd4778 03ee985 0f5b8d7 03ee985 fbd4778 fea3890 d73a9dd fea3890 d73a9dd fea3890 f86b15f 197e569 f384f96 d73a9dd fbd4778 197e569 0671dc0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 |
"""
qa.py — GPT-4o (SAP Gen AI Hub) + ReRank Retrieval
--------------------------------------------------
✅ Semantic retrieval (FAISS + cosine re-rank + neighbor fill)
✅ Bullet-aware similarity boost for procedural chunks
✅ Embedding caching (per PDF + chunk config aware)
✅ Smart factual mode (fast)
✅ Deep reasoning mode (ChatGPT-like)
✅ genai_generate() helper for suggestions
"""
import os
import re
import json
import pickle
import hashlib
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
from gen_ai_hub.proxy.core.proxy_clients import get_proxy_client
from gen_ai_hub.proxy.langchain.openai import ChatOpenAI
print("✅ qa.py (GPT-4o via Gen AI Hub + Bullet-Aware Retrieval + Cache) loaded from:", __file__)
# ==========================================================
# 1️⃣ Hugging Face Cache Setup
# ==========================================================
CACHE_DIR = "/tmp/hf_cache"
os.makedirs(CACHE_DIR, exist_ok=True)
os.environ.update({
"HF_HOME": CACHE_DIR,
"TRANSFORMERS_CACHE": CACHE_DIR,
"HF_DATASETS_CACHE": CACHE_DIR,
"HF_MODULES_CACHE": CACHE_DIR
})
# ==========================================================
# 2️⃣ Embedding Model (E5-small-v2)
# ==========================================================
try:
_query_model = SentenceTransformer(
"intfloat/e5-small-v2", # ⚡ Faster, 384-dim embeddings
cache_folder=CACHE_DIR
)
print("✅ Loaded embedding model: intfloat/e5-small-v2 (fast mode)")
except Exception as e:
print(f"⚠️ Embedding load failed ({e}), using MiniLM fallback")
_query_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2", cache_folder=CACHE_DIR)
# ==========================================================
# 3️⃣ GPT-4o via SAP Gen AI Hub
# ==========================================================
print("✅ Loading GPT-4o via SAP Gen AI Hub...")
CRED_PATH = os.path.join(os.path.dirname(__file__), "GEN AI HUB PROXY.json")
try:
with open(CRED_PATH, "r") as key_file:
svcKey = json.load(key_file)
os.environ.update({
"AICORE_AUTH_URL": svcKey["url"],
"AICORE_CLIENT_ID": svcKey["clientid"],
"AICORE_CLIENT_SECRET": svcKey["clientsecret"],
"AICORE_RESOURCE_GROUP": "default",
"AICORE_BASE_URL": svcKey["serviceurls"]["AI_API_URL"]
})
proxy_client = get_proxy_client("gen-ai-hub")
chat_llm = ChatOpenAI(
proxy_model_name="gpt-4o",
proxy_client=proxy_client,
temperature=0.3,
max_tokens=1500
)
print("✅ GPT-4o (via Gen AI Hub) ready for generation.")
except Exception as e:
print(f"⚠️ Gen AI Hub setup failed: {e}")
chat_llm = None
# ==========================================================
# 4️⃣ Embedding Generator (batch-optimized)
# ==========================================================
def embed_chunks(chunks, batch_size: int = 32):
"""
Batch-encode text chunks using the global embedding model.
Normalized 384-dim embeddings for FAISS retrieval.
"""
if not chunks:
return np.array([])
all_embeddings = []
for i in range(0, len(chunks), batch_size):
batch = [f"passage: {c}" for c in chunks[i:i + batch_size]]
batch_embs = _query_model.encode(
batch,
convert_to_numpy=True,
normalize_embeddings=True,
show_progress_bar=False
)
all_embeddings.extend(batch_embs)
print(f"⚡ Embedded {len(all_embeddings)} chunks in batches of {batch_size}")
return np.array(all_embeddings)
# ==========================================================
# 5️⃣ Embedding Cache Manager (Chunk-Aware + Auto-Cleanup)
# ==========================================================
CACHE_EMB_DIR = "/tmp/embed_cache"
os.makedirs(CACHE_EMB_DIR, exist_ok=True)
def _hash_name(file_name: str, chunk_size: int, overlap: int, num_chunks: int):
"""Generate unique short hash for a file + chunking configuration."""
combo = f"{file_name}_{chunk_size}_{overlap}_{num_chunks}"
return hashlib.md5(combo.encode()).hexdigest()[:8]
def _clean_old_caches(base_name: str, keep_latest: int = 5):
"""Keep only latest few embedding caches for each document."""
files = [
(os.path.getmtime(os.path.join(CACHE_EMB_DIR, f)), f)
for f in os.listdir(CACHE_EMB_DIR)
if f.startswith(base_name)
]
if len(files) > keep_latest:
files.sort(reverse=True)
for _, old_file in files[keep_latest:]:
try:
os.remove(os.path.join(CACHE_EMB_DIR, old_file))
print(f"🧹 Removed old cache: {old_file}")
except Exception:
pass
def cache_embeddings(file_name: str, chunks, embed_func, chunk_size: int = None, overlap: int = None):
"""Load or create embeddings cache (chunk size + overlap aware)."""
cache_key = _hash_name(file_name, chunk_size or 1000, overlap or 100, len(chunks))
cache_file = f"{os.path.basename(file_name)}_cs{chunk_size}_ov{overlap}_{cache_key}.pkl"
cache_path = os.path.join(CACHE_EMB_DIR, cache_file)
base_name = os.path.basename(file_name)
if os.path.exists(cache_path):
print(f"🧠 Loaded cached embeddings for {base_name} ({chunk_size}/{overlap})")
with open(cache_path, "rb") as f:
return pickle.load(f)
print(f"💡 No cache found for {base_name} ({chunk_size}/{overlap}). Generating new embeddings...")
embeddings = embed_func(chunks)
with open(cache_path, "wb") as f:
pickle.dump(embeddings, f)
print(f"💾 Cached embeddings saved as {cache_file}")
_clean_old_caches(base_name, keep_latest=5)
return embeddings
# ==========================================================
# 6️⃣ Prompt Templates
# ==========================================================
STRICT_PROMPT = (
"You are an enterprise documentation assistant.\n"
"Use all relevant information from the CONTEXT below.\n"
"If multiple related points appear across chunks, combine them logically into one clear answer.\n"
"Keep the answer concise but complete. Do not invent facts outside the provided content.\n"
"If the answer cannot be found even after considering all chunks, say exactly:\n"
"'I don't know based on the provided document.'\n\n"
"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
)
REASONING_PROMPT = (
"You are an expert enterprise assistant capable of reasoning.\n"
"Think step by step and synthesize information even if scattered across chunks.\n"
"Base your answer primarily on the CONTEXT, but if multiple partial clues exist, combine them logically.\n"
"You may fill reasonable gaps with general knowledge to form a complete answer.\n"
"If absolutely nothing in the document relates, say exactly:\n"
"'I don't know based on the provided document.'\n\n"
"Context:\n{context}\n\nQuestion: {query}\nLet's reason step-by-step:\nAnswer:"
)
# ==========================================================
# 7️⃣ Retrieval — FAISS + Bullet-Aware Re-rank + Neighbor Fill
# ==========================================================
from vectorstore import build_faiss_index
def retrieve_chunks(query: str, index, chunks: list, top_k: int = 5,
min_similarity: float = 0.6, candidate_multiplier: int = 3,
embeddings: list = None):
if not index or not chunks:
print("⚠️ No FAISS index or chunks provided — returning empty result.")
return []
try:
q_emb = _query_model.encode(
[f"query: {query.strip()}"],
convert_to_numpy=True,
normalize_embeddings=True
)[0]
if hasattr(index, "d") and q_emb.shape[0] != index.d:
print(f"⚠️ FAISS dimension mismatch: index={index.d}, query={q_emb.shape[0]}")
if embeddings:
print("🔄 Rebuilding FAISS index...")
index = build_faiss_index(embeddings)
else:
return []
num_candidates = max(top_k * candidate_multiplier, top_k + 2)
distances, indices = index.search(np.array([q_emb]).astype("float32"), num_candidates)
candidate_indices = [int(i) for i in indices[0] if i >= 0]
candidate_indices = list(dict.fromkeys(candidate_indices))
doc_embs = _query_model.encode(
[f"passage: {chunks[i]}" for i in candidate_indices],
convert_to_numpy=True,
normalize_embeddings=True,
)
sims = cosine_similarity([q_emb], doc_embs)[0]
boosted_sims = []
for idx, sim in zip(candidate_indices, sims):
text = chunks[idx].strip()
if re.match(r"^[-•\d]+[\.\s]", text):
sim += 0.05
boosted_sims.append((idx, sim))
ranked = sorted(boosted_sims, key=lambda x: x[1], reverse=True)
filtered = [idx for idx, sim in ranked if sim >= min_similarity][:top_k]
neighbors = set()
for idx in filtered:
for n in [idx - 1, idx + 1]:
if 0 <= n < len(chunks):
neighbors.add(n)
filtered = sorted(set(filtered) | neighbors)
final_chunks = [chunks[i] for i in filtered]
print(f"✅ Retrieved {len(final_chunks)} chunks (bullet-aware + continuity).")
return final_chunks
except Exception as e:
print(f"⚠️ Retrieval error: {repr(e)}")
return []
# ==========================================================
# 8️⃣ Answer Generation
# ==========================================================
def generate_answer(query: str, retrieved_chunks: list, reasoning_mode: bool = False):
if not retrieved_chunks:
return "Sorry, I couldn’t find relevant information in the document."
if chat_llm is None:
return "⚠️ GPT-4o not initialized. Check credentials or rebuild the Space."
context = "\n".join(f"[Chunk {i+1}] {chunk.strip()}" for i, chunk in enumerate(retrieved_chunks))
prompt = (REASONING_PROMPT if reasoning_mode else STRICT_PROMPT).format(context=context, query=query)
messages = [
{"role": "system", "content":
"You are an expert enterprise documentation assistant. "
"When reasoning_mode is off, stay strictly factual and concise. "
"When reasoning_mode is on, combine insights across chunks logically "
"and explain briefly. "
"If the answer is not in the document, reply exactly: "
"'I don't know based on the provided document.'"},
{"role": "user", "content": prompt},
]
try:
response = chat_llm.invoke(messages)
return response.content.strip()
except Exception as e:
print(f"⚠️ GPT-4o generation failed: {e}")
return "⚠️ Error: Could not generate an answer."
# ==========================================================
# 9️⃣ Generic Text Generation Helper (for AI suggestions)
# ==========================================================
def genai_generate(prompt: str) -> str:
if chat_llm is None:
raise RuntimeError("⚠️ GPT-4o not initialized. Check credentials or rebuild the Space.")
messages = [
{"role": "system", "content": "You are a concise, intelligent text generator."},
{"role": "user", "content": prompt.strip()},
]
try:
response = chat_llm.invoke(messages)
return response.content.strip()
except Exception as e:
print(f"⚠️ genai_generate() failed: {e}")
return "⚠️ Unable to generate response."
# ==========================================================
# 🔟 Local Test
# ==========================================================
if __name__ == "__main__":
from vectorstore import build_faiss_index
dummy_chunks = [
"- Step 1: Enable order confirmation capability.",
"- Step 2: Configure supplier email.",
"Setup instructions and configuration details.",
"Prerequisites for automation are described here."
]
embeddings = embed_chunks(dummy_chunks)
index = build_faiss_index(embeddings)
query = "What are the prerequisites for commerce automation?"
retrieved = retrieve_chunks(query, index, dummy_chunks)
print("🔍 Retrieved:", retrieved)
print("💬 Answer:", generate_answer(query, retrieved, reasoning_mode=False))
|