File size: 10,750 Bytes
cd266a5
d5f56bf
 
 
d73a9dd
f86b15f
d5f56bf
9466a37
cd266a5
 
ebbd49e
d73a9dd
d5f56bf
f86b15f
 
e97699c
9f0da7b
41ac7b0
d5f56bf
 
1242abb
f86b15f
1242abb
fbd4778
f86b15f
fbd4778
50ab09a
0c81fa1
cd266a5
 
 
 
 
 
6718956
fbd4778
acebfd4
fbd4778
43b802c
acebfd4
f86b15f
acebfd4
 
 
a610ce4
fbd4778
fea3890
cd266a5
fbd4778
d5f56bf
fbd4778
8db2f50
e00eaea
8db2f50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353bd1c
8db2f50
 
 
 
 
197e569
fbd4778
f86b15f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fbd4778
f384f96
197e569
0671dc0
 
d511cfa
0671dc0
fbd4778
386cde6
 
 
f384f96
d5f56bf
edaeee6
 
 
 
fbd4778
d5f56bf
f384f96
 
fbd4778
f86b15f
fbd4778
d73a9dd
 
fbd4778
1b878f3
0671dc0
1b878f3
f86b15f
 
1b878f3
 
fea3890
1b878f3
fea3890
41ac7b0
235a5b5
0671dc0
1b878f3
 
 
235a5b5
0671dc0
f86b15f
0671dc0
f86b15f
0671dc0
f86b15f
0671dc0
 
 
 
 
 
 
 
f86b15f
0671dc0
f86b15f
235a5b5
9466a37
235a5b5
9466a37
235a5b5
0671dc0
d73a9dd
 
f86b15f
 
 
d73a9dd
 
 
f86b15f
d73a9dd
f86b15f
0671dc0
 
 
 
 
 
e9faa78
d5f56bf
d73a9dd
d5f56bf
235a5b5
 
1b878f3
235a5b5
 
fbd4778
f86b15f
fbd4778
f384f96
fea3890
 
d5f56bf
 
fea3890
fbd4778
d5f56bf
 
 
f86b15f
 
 
 
 
 
 
d5f56bf
 
cd6e69b
c7133f4
d5f56bf
 
743f89e
fe9b982
386cde6
fea3890
fbd4778
f86b15f
fbd4778
fea3890
d73a9dd
 
fea3890
d73a9dd
 
 
 
fea3890
f86b15f
 
197e569
f384f96
d73a9dd
fbd4778
197e569
0671dc0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
"""
qa.py — GPT-4o (SAP Gen AI Hub) + ReRank Retrieval
--------------------------------------------------
✅ Semantic retrieval (FAISS + cosine re-rank + neighbor fill)
✅ Bullet-aware similarity boost for procedural chunks
✅ Embedding caching (per PDF)
✅ Smart factual mode (fast)
✅ Deep reasoning mode (ChatGPT-like)
"""

import os
import re
import json
import pickle
import hashlib
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
from gen_ai_hub.proxy.core.proxy_clients import get_proxy_client
from gen_ai_hub.proxy.langchain.openai import ChatOpenAI

print("✅ qa.py (GPT-4o via Gen AI Hub + Bullet-Aware Retrieval + Cache) loaded from:", __file__)

# ==========================================================
# 1️⃣ Hugging Face Cache Setup
# ==========================================================
CACHE_DIR = "/tmp/hf_cache"
os.makedirs(CACHE_DIR, exist_ok=True)
os.environ.update({
    "HF_HOME": CACHE_DIR,
    "TRANSFORMERS_CACHE": CACHE_DIR,
    "HF_DATASETS_CACHE": CACHE_DIR,
    "HF_MODULES_CACHE": CACHE_DIR
})

# ==========================================================
# 2️⃣ Embedding Model (E5-small-v2)
# ==========================================================
try:
    _query_model = SentenceTransformer(
        "intfloat/e5-small-v2",  # ⚡ Faster, 384-dim embeddings
        cache_folder=CACHE_DIR
    )
    print("✅ Loaded embedding model: intfloat/e5-small-v2 (fast mode)")
except Exception as e:
    print(f"⚠️ Embedding load failed ({e}), using MiniLM fallback")
    _query_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2", cache_folder=CACHE_DIR)

# ==========================================================
# 3️⃣ GPT-4o via SAP Gen AI Hub
# ==========================================================
print("✅ Loading GPT-4o via SAP Gen AI Hub...")
CRED_PATH = os.path.join(os.path.dirname(__file__), "GEN AI HUB PROXY.json")

try:
    with open(CRED_PATH, "r") as key_file:
        svcKey = json.load(key_file)

    os.environ.update({
        "AICORE_AUTH_URL": svcKey["url"],
        "AICORE_CLIENT_ID": svcKey["clientid"],
        "AICORE_CLIENT_SECRET": svcKey["clientsecret"],
        "AICORE_RESOURCE_GROUP": "default",
        "AICORE_BASE_URL": svcKey["serviceurls"]["AI_API_URL"]
    })

    proxy_client = get_proxy_client("gen-ai-hub")
    chat_llm = ChatOpenAI(
        proxy_model_name="gpt-4o",
        proxy_client=proxy_client,
        temperature=0.3,
        max_tokens=1500
    )
    print("✅ GPT-4o (via Gen AI Hub) ready for generation.")
except Exception as e:
    print(f"⚠️ Gen AI Hub setup failed: {e}")
    chat_llm = None

# ==========================================================
# 4️⃣ Embedding Cache Manager
# ==========================================================
CACHE_EMB_DIR = "/tmp/embed_cache"
os.makedirs(CACHE_EMB_DIR, exist_ok=True)

def _hash_name(file_name: str):
    """Generate unique hash for PDF file name."""
    return hashlib.md5(file_name.encode()).hexdigest()

def cache_embeddings(file_name: str, chunks, embed_func):
    """
    Checks if cached embeddings exist for a PDF; if not, compute and save.
    """
    cache_path = os.path.join(CACHE_EMB_DIR, f"{_hash_name(file_name)}.pkl")

    if os.path.exists(cache_path):
        print(f"🧠 Loaded cached embeddings for {file_name}")
        with open(cache_path, "rb") as f:
            return pickle.load(f)

    print(f"💡 No cache found for {file_name}. Generating embeddings...")
    embeddings = embed_func(chunks)
    with open(cache_path, "wb") as f:
        pickle.dump(embeddings, f)
    print(f"💾 Cached embeddings saved for {file_name}")
    return embeddings

def embed_chunks(chunks, batch_size=32):
    """
    Batch-encode text chunks for speed.
    """
    all_embeddings = []
    for i in range(0, len(chunks), batch_size):
        batch = [f"passage: {c}" for c in chunks[i:i+batch_size]]
        batch_embs = _query_model.encode(
            batch,
            convert_to_numpy=True,
            normalize_embeddings=True,
            show_progress_bar=False
        )
        all_embeddings.extend(batch_embs)
    print(f"⚡ Embedded {len(all_embeddings)} chunks in batches of {batch_size}")
    return np.array(all_embeddings)

# ==========================================================
# 5️⃣ Prompt Templates
# ==========================================================
STRICT_PROMPT = (
    "You are an enterprise documentation assistant.\n"
    "Use all relevant information from the CONTEXT below.\n"
    "If multiple related points appear across chunks, combine them logically into one clear answer.\n"
    "Keep the answer concise but complete. Do not invent facts outside the provided content.\n"
    "If the answer cannot be found even after considering all chunks, say exactly:\n"
    "'I don't know based on the provided document.'\n\n"
    "Context:\n{context}\n\nQuestion: {query}\nAnswer:"
)

REASONING_PROMPT = (
    "You are an expert enterprise assistant capable of reasoning.\n"
    "Think step by step and synthesize information even if scattered across chunks.\n"
    "Base your answer primarily on the CONTEXT, but if multiple partial clues exist, combine them logically.\n"
    "You may fill reasonable gaps with general knowledge to form a complete answer.\n"
    "If absolutely nothing in the document relates, say exactly:\n"
    "'I don't know based on the provided document.'\n\n"
    "Context:\n{context}\n\nQuestion: {query}\nLet's reason step-by-step:\nAnswer:"
)

# ==========================================================
# 6️⃣ Retrieval — FAISS + Bullet-Aware Re-rank + Neighbor Fill
# ==========================================================
from vectorstore import build_faiss_index

def retrieve_chunks(query: str, index, chunks: list, top_k: int = 5,
                    min_similarity: float = 0.6, candidate_multiplier: int = 3,
                    embeddings: list = None):
    """
    Retrieves top relevant chunks and preserves context continuity.
    Adds small similarity boost for procedural (bullet or numbered) chunks.
    """

    if not index or not chunks:
        print("⚠️ No FAISS index or chunks provided — returning empty result.")
        return []

    try:
        q_emb = _query_model.encode(
            [f"query: {query.strip()}"],
            convert_to_numpy=True,
            normalize_embeddings=True
        )[0]

        # ✅ Dimension sanity check
        if hasattr(index, "d") and q_emb.shape[0] != index.d:
            print(f"⚠️ FAISS dimension mismatch: index={index.d}, query={q_emb.shape[0]}")
            if embeddings:
                print("🔄 Rebuilding FAISS index...")
                index = build_faiss_index(embeddings)
            else:
                return []

        # Step 1️⃣ — Initial FAISS retrieval
        num_candidates = max(top_k * candidate_multiplier, top_k + 2)
        distances, indices = index.search(np.array([q_emb]).astype("float32"), num_candidates)
        candidate_indices = [int(i) for i in indices[0] if i >= 0]
        candidate_indices = list(dict.fromkeys(candidate_indices))

        # Step 2️⃣ — Re-rank with bullet-aware boost
        doc_embs = _query_model.encode(
            [f"passage: {chunks[i]}" for i in candidate_indices],
            convert_to_numpy=True,
            normalize_embeddings=True,
        )
        sims = cosine_similarity([q_emb], doc_embs)[0]
        boosted_sims = []
        for idx, sim in zip(candidate_indices, sims):
            text = chunks[idx].strip()
            if re.match(r"^[-•\d]+[\.\s]", text):  # bullet or step pattern
                sim += 0.05
            boosted_sims.append((idx, sim))

        ranked = sorted(boosted_sims, key=lambda x: x[1], reverse=True)
        filtered = [idx for idx, sim in ranked if sim >= min_similarity][:top_k]

        # Step 3️⃣ — Add neighboring chunks for continuity
        neighbors = set()
        for idx in filtered:
            for n in [idx - 1, idx + 1]:
                if 0 <= n < len(chunks):
                    neighbors.add(n)
        filtered = sorted(set(filtered) | neighbors)

        final_chunks = [chunks[i] for i in filtered]
        print(f"✅ Retrieved {len(final_chunks)} chunks (bullet-aware + continuity).")
        return final_chunks

    except Exception as e:
        print(f"⚠️ Retrieval error: {repr(e)}")
        return []

# ==========================================================
# 7️⃣ Answer Generation
# ==========================================================
def generate_answer(query: str, retrieved_chunks: list, reasoning_mode: bool = False):
    if not retrieved_chunks:
        return "Sorry, I couldn’t find relevant information in the document."
    if chat_llm is None:
        return "⚠️ GPT-4o not initialized. Check credentials or rebuild the Space."

    context = "\n".join(f"[Chunk {i+1}] {chunk.strip()}" for i, chunk in enumerate(retrieved_chunks))
    prompt = (REASONING_PROMPT if reasoning_mode else STRICT_PROMPT).format(context=context, query=query)

    messages = [
        {"role": "system", "content":
            "You are an expert enterprise documentation assistant. "
            "When reasoning_mode is off, stay strictly factual and concise. "
            "When reasoning_mode is on, combine insights across chunks logically "
            "and explain briefly. "
            "If the answer is not in the document, reply exactly: "
            "'I don't know based on the provided document.'"},
        {"role": "user", "content": prompt},
    ]

    try:
        response = chat_llm.invoke(messages)
        return response.content.strip()
    except Exception as e:
        print(f"⚠️ GPT-4o generation failed: {e}")
        return "⚠️ Error: Could not generate an answer."

# ==========================================================
# 8️⃣ Local Test
# ==========================================================
if __name__ == "__main__":
    from vectorstore import build_faiss_index

    dummy_chunks = [
        "- Step 1: Enable order confirmation capability.",
        "- Step 2: Configure supplier email.",
        "Setup instructions and configuration details.",
        "Prerequisites for automation are described here."
    ]

    embeddings = embed_chunks(dummy_chunks)
    index = build_faiss_index(embeddings)

    query = "What are the prerequisites for commerce automation?"
    retrieved = retrieve_chunks(query, index, dummy_chunks)
    print("🔍 Retrieved:", retrieved)
    print("💬 Answer:", generate_answer(query, retrieved, reasoning_mode=False))