""" qa.py — GPT-4o (SAP Gen AI Hub) + ReRank Retrieval -------------------------------------------------- ✅ Semantic retrieval (FAISS + cosine re-rank + neighbor fill) ✅ Bullet-aware similarity boost for procedural chunks ✅ Embedding caching (per PDF + chunk config aware) ✅ Smart factual mode (fast) ✅ Deep reasoning mode (ChatGPT-like) ✅ genai_generate() helper for suggestions """ import os import re import json import pickle import hashlib import numpy as np from sentence_transformers import SentenceTransformer from sklearn.metrics.pairwise import cosine_similarity from gen_ai_hub.proxy.core.proxy_clients import get_proxy_client from gen_ai_hub.proxy.langchain.openai import ChatOpenAI print("✅ qa.py (GPT-4o via Gen AI Hub + Bullet-Aware Retrieval + Cache) loaded from:", __file__) # ========================================================== # 🧱 Permanent Embeddings Cache Directory # ========================================================== CACHE_EMB_DIR = os.path.join(os.path.dirname(__file__), "embed_cache") os.makedirs(CACHE_EMB_DIR, exist_ok=True) # Verify write permission try: test_file = os.path.join(CACHE_EMB_DIR, "test_write.tmp") with open(test_file, "w") as f: f.write("ok") os.remove(test_file) print(f"✅ Cache directory ready and writable: {CACHE_EMB_DIR}") except Exception as e: print(f"⚠️ Cache directory not writable ({CACHE_EMB_DIR}): {e}") CACHE_EMB_DIR = "/tmp/embed_cache" os.makedirs(CACHE_EMB_DIR, exist_ok=True) print(f"🔄 Fallback to temporary cache: {CACHE_EMB_DIR}") # ========================================================== # 1️⃣ Hugging Face Cache Setup # ========================================================== CACHE_DIR = "/tmp/hf_cache" os.makedirs(CACHE_DIR, exist_ok=True) os.environ.update({ "HF_HOME": CACHE_DIR, "TRANSFORMERS_CACHE": CACHE_DIR, "HF_DATASETS_CACHE": CACHE_DIR, "HF_MODULES_CACHE": CACHE_DIR }) # ========================================================== # 2️⃣ Embedding Model (E5-small-v2) # ========================================================== try: _query_model = SentenceTransformer( "intfloat/e5-small-v2", # ⚡ Faster, 384-dim embeddings cache_folder=CACHE_DIR ) print("✅ Loaded embedding model: intfloat/e5-small-v2 (fast mode)") except Exception as e: print(f"⚠️ Embedding load failed ({e}), using MiniLM fallback") _query_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2", cache_folder=CACHE_DIR) # ========================================================== # 3️⃣ GPT-4o via SAP Gen AI Hub — Lazy / On-demand initialization # ========================================================== CRED_PATH = os.path.join(os.path.dirname(__file__), "GEN AI HUB PROXY.json") _chat_llm = None # cached instance def get_chat_llm(model_name: str = "gpt-4o", temperature: float = 0.3, max_tokens: int = 1500): """ Lazily initializes ChatOpenAI via Gen AI Hub proxy. Only runs when first needed; cached afterward. """ global _chat_llm if _chat_llm is not None: return _chat_llm try: # Optional: set environment variables from service key if present if os.path.exists(CRED_PATH): with open(CRED_PATH, "r") as key_file: svcKey = json.load(key_file) os.environ.update({ "AICORE_AUTH_URL": svcKey.get("url", ""), "AICORE_CLIENT_ID": svcKey.get("clientid", ""), "AICORE_CLIENT_SECRET": svcKey.get("clientsecret", ""), "AICORE_BASE_URL": svcKey.get("serviceurls", {}).get("AI_API_URL", ""), }) proxy_client = get_proxy_client("gen-ai-hub") _chat_llm = ChatOpenAI( proxy_model_name=model_name, proxy_client=proxy_client, temperature=temperature, max_tokens=max_tokens, ) print(f"✅ GPT-4o (via Gen AI Hub) initialized lazily for model: {model_name}") return _chat_llm except Exception as e: print(f"⚠️ Gen AI Hub lazy init failed: {e}") _chat_llm = None raise # ========================================================== # 4️⃣ Embedding Generator (batch-optimized) # ========================================================== def embed_chunks(chunks, batch_size: int = 32): """ Batch-encode text chunks using the global embedding model. Normalized 384-dim embeddings for FAISS retrieval. """ if not chunks: return np.array([]) all_embeddings = [] for i in range(0, len(chunks), batch_size): batch = [f"passage: {c}" for c in chunks[i:i + batch_size]] batch_embs = _query_model.encode( batch, convert_to_numpy=True, normalize_embeddings=True, show_progress_bar=False ) all_embeddings.extend(batch_embs) print(f"⚡ Embedded {len(all_embeddings)} chunks in batches of {batch_size}") return np.array(all_embeddings) # ========================================================== # 5️⃣ Embedding Cache Manager (Chunk-Aware + Auto-Cleanup) # ========================================================== CACHE_EMB_DIR = "/tmp/embed_cache" os.makedirs(CACHE_EMB_DIR, exist_ok=True) def _hash_name(file_name: str, chunk_size: int, overlap: int, num_chunks: int): """Generate unique short hash for a file + chunking configuration.""" combo = f"{file_name}_{chunk_size}_{overlap}_{num_chunks}" return hashlib.md5(combo.encode()).hexdigest()[:8] def _clean_old_caches(base_name: str, keep_latest: int = 5): """Keep only latest few embedding caches for each document.""" files = [ (os.path.getmtime(os.path.join(CACHE_EMB_DIR, f)), f) for f in os.listdir(CACHE_EMB_DIR) if f.startswith(base_name) ] if len(files) > keep_latest: files.sort(reverse=True) for _, old_file in files[keep_latest:]: try: os.remove(os.path.join(CACHE_EMB_DIR, old_file)) print(f"🧹 Removed old cache: {old_file}") except Exception: pass def cache_embeddings(file_name: str, chunks, embed_func, chunk_size: int = None, overlap: int = None): """Load or create embeddings cache (chunk size + overlap aware).""" cache_key = _hash_name(file_name, chunk_size or 1000, overlap or 100, len(chunks)) cache_file = f"{os.path.basename(file_name)}_cs{chunk_size}_ov{overlap}_{cache_key}.pkl" cache_path = os.path.join(CACHE_EMB_DIR, cache_file) base_name = os.path.basename(file_name) if os.path.exists(cache_path): print(f"🧠 Loaded cached embeddings for {base_name} ({chunk_size}/{overlap})") with open(cache_path, "rb") as f: return pickle.load(f) print(f"💡 No cache found for {base_name} ({chunk_size}/{overlap}). Generating new embeddings...") embeddings = embed_func(chunks) with open(cache_path, "wb") as f: pickle.dump(embeddings, f) print(f"💾 Cached embeddings saved as {cache_file}") _clean_old_caches(base_name, keep_latest=5) return embeddings # ========================================================== # 6️⃣ Prompt Templates (Enhanced for Structured Formatting + Clean Output) # ========================================================== STRICT_PROMPT = ( "You are an enterprise documentation assistant.\n" "Use all relevant information from the CONTEXT below.\n" "When multiple causes, steps, or key points are discussed, present them as short, well-structured bullet points.\n" "When the answer focuses on a single concept, definition, or explanation, write it as a clear and compact paragraph.\n" "Keep the tone professional and concise. Do not invent facts outside the provided content.\n" "Do not mention or refer to internal elements such as 'chunks', 'chunk numbers', 'passages', or 'sections of the document'.\n" "If the answer cannot be found directly but there are partial clues, summarize those clues briefly starting with 'Based on the available information,'.\n" "If nothing at all in the CONTEXT relates to the question, reply exactly:\n" "'I don't know based on the provided document.'\n\n" "Context:\n{context}\n\nQuestion: {query}\nAnswer:" ) REASONING_PROMPT = ( "You are an expert enterprise assistant capable of reasoning.\n" "Think step by step and synthesize information even if scattered across chunks.\n" "Base your answer primarily on the CONTEXT, but if multiple partial clues exist, combine them logically.\n" "You may fill reasonable gaps with general knowledge to form a complete answer.\n" "Do not mention or refer to internal elements such as 'chunks', 'chunk numbers', 'passages', or 'sections of the document'.\n" "If absolutely nothing in the document relates, say exactly:\n" "'I don't know based on the provided document.'\n\n" "Context:\n{context}\n\nQuestion: {query}\nLet's reason step-by-step:\nAnswer:" ) # ========================================================== # 7️⃣ Retrieval — FAISS + Bullet-Aware Re-rank + Neighbor Fill # ========================================================== from vectorstore import build_faiss_index def retrieve_chunks(query: str, index, chunks: list, top_k: int = 7, min_similarity: float = 0.4, candidate_multiplier: int = 3, embeddings: list = None): """ Retrieves the most relevant chunks using FAISS similarity + reranking. Includes bullet-aware similarity boost and a fallback mechanism if similarity threshold isn't met — ensuring predictable, complete retrieval. """ if not index or not chunks: print("⚠️ No FAISS index or chunks provided — returning empty result.") return [] try: # --- Encode query q_emb = _query_model.encode( [f"query: {query.strip()}"], convert_to_numpy=True, normalize_embeddings=True )[0] # --- Rebuild index if mismatch occurs if hasattr(index, "d") and q_emb.shape[0] != index.d: print(f"⚠️ FAISS dimension mismatch: index={index.d}, query={q_emb.shape[0]}") if embeddings: print("🔄 Rebuilding FAISS index...") index = build_faiss_index(embeddings) else: return [] # --- Retrieve top candidate chunks num_candidates = max(top_k * candidate_multiplier, top_k + 2) distances, indices = index.search(np.array([q_emb]).astype("float32"), num_candidates) candidate_indices = [int(i) for i in indices[0] if i >= 0] candidate_indices = list(dict.fromkeys(candidate_indices)) # remove duplicates # --- Re-rank using cosine similarity doc_embs = _query_model.encode( [f"passage: {chunks[i]}" for i in candidate_indices], convert_to_numpy=True, normalize_embeddings=True, ) sims = cosine_similarity([q_emb], doc_embs)[0] boosted_sims = [] for idx, sim in zip(candidate_indices, sims): text = chunks[idx].strip() if re.match(r"^[-•\d]+[\.\s]", text): sim += 0.05 # slight boost for procedural bullets boosted_sims.append((idx, sim)) ranked = sorted(boosted_sims, key=lambda x: x[1], reverse=True) # --- Filter based on similarity threshold filtered = [idx for idx, sim in ranked if sim >= min_similarity][:top_k] # --- Fallback: if no matches above threshold, pick top_k anyway if not filtered: print(f"⚠️ No chunks ≥ {min_similarity:.2f} — using top {top_k} ranked chunks instead.") filtered = [idx for idx, sim in ranked[:top_k]] # --- Neighbor continuity: include nearby chunks neighbors = set() for idx in filtered: for n in [idx - 1, idx + 1]: if 0 <= n < len(chunks): neighbors.add(n) filtered = sorted(set(filtered) | neighbors) # --- Return final chunk set final_chunks = [chunks[i] for i in filtered] avg_sim = np.mean([s for _, s in ranked[:top_k]]) print(f"✅ Retrieved {len(final_chunks)} chunks | avg_sim={avg_sim:.3f} | threshold={min_similarity:.2f}") return final_chunks except Exception as e: print(f"⚠️ Retrieval error: {repr(e)}") return [] # ========================================================== # 8️⃣ Answer Generation (Lazy GPT-4o Initialization) # ========================================================== def generate_answer(query: str, retrieved_chunks: list, reasoning_mode: bool = False): if not retrieved_chunks: return "Sorry, I couldn’t find relevant information in the document." # Try lazy initialization try: chat_llm_local = get_chat_llm() except Exception: return "⚠️ GPT-4o not initialized. Check credentials or rebuild the Space." # Build context and prompt context = "\n".join(f"[Chunk {i+1}] {chunk.strip()}" for i, chunk in enumerate(retrieved_chunks)) prompt = (REASONING_PROMPT if reasoning_mode else STRICT_PROMPT).format(context=context, query=query) messages = [ {"role": "system", "content": "You are an expert enterprise documentation assistant. " "When reasoning_mode is off, stay strictly factual and concise. " "When reasoning_mode is on, combine insights across chunks logically " "and explain briefly. " "If the answer is not in the document, reply exactly: " "'I don't know based on the provided document.'"}, {"role": "user", "content": prompt}, ] # Invoke GPT-4o try: response = chat_llm_local.invoke(messages) return response.content.strip() except Exception as e: print(f"⚠️ GPT-4o generation failed: {e}") return "⚠️ Error: Could not generate an answer." # ========================================================== # 9️⃣ Generic Text Generation Helper (for AI suggestions) # ========================================================== def genai_generate(prompt: str) -> str: # Try lazy initialization try: chat_llm_local = get_chat_llm() except Exception: raise RuntimeError("⚠️ GPT-4o not initialized. Check credentials or rebuild the Space.") messages = [ {"role": "system", "content": "You are a concise, intelligent text generator."}, {"role": "user", "content": prompt.strip()}, ] try: response = chat_llm_local.invoke(messages) return response.content.strip() except Exception as e: print(f"⚠️ genai_generate() failed: {e}") return "⚠️ Unable to generate response." # ========================================================== # 🔟 Local Test # ========================================================== if __name__ == "__main__": from vectorstore import build_faiss_index dummy_chunks = [ "- Step 1: Enable order confirmation capability.", "- Step 2: Configure supplier email.", "Setup instructions and configuration details.", "Prerequisites for automation are described here." ] embeddings = embed_chunks(dummy_chunks) index = build_faiss_index(embeddings) query = "What are the prerequisites for commerce automation?" retrieved = retrieve_chunks(query, index, dummy_chunks) print("🔍 Retrieved:", retrieved) print("💬 Answer:", generate_answer(query, retrieved, reasoning_mode=False))