File size: 13,188 Bytes
4ec6a61
5d37e56
4ec6a61
f978245
 
 
 
 
c9a83aa
d52dead
f978245
5d37e56
f978245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d37e56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361f68a
5d37e56
 
f978245
 
 
5d37e56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f978245
 
 
 
b0f4fb5
f978245
 
96e2f7f
f978245
 
 
 
 
5d37e56
f978245
5d37e56
f978245
 
b0f4fb5
5d37e56
96e2f7f
f978245
 
5d37e56
f978245
 
5d37e56
f978245
 
5d37e56
f978245
 
 
96e2f7f
5d37e56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d52dead
96e2f7f
aa034da
5d37e56
aa034da
 
 
5d37e56
aa034da
5d37e56
 
 
aa034da
 
 
 
 
5d37e56
 
 
 
aa034da
f978245
 
 
 
 
 
5d37e56
f978245
 
5d37e56
f978245
5d37e56
 
3fbd2b9
5d37e56
 
3fbd2b9
 
5d37e56
 
2239986
ca5587a
 
 
f3aae0f
190f0f1
 
f3aae0f
ca5587a
5d37e56
 
 
190f0f1
5d37e56
ca5587a
dc571c1
190f0f1
 
1dfabf4
3d6631f
 
1dfabf4
5d37e56
3d6631f
 
3fbd2b9
3d6631f
 
 
 
 
 
3fbd2b9
96e2f7f
 
 
 
5d37e56
96e2f7f
 
5d37e56
 
3fbd2b9
3d6631f
 
 
 
 
 
 
 
5d37e56
3d6631f
3fbd2b9
5d37e56
 
 
 
 
 
f978245
5d37e56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f978245
5d37e56
 
 
 
f978245
3d6631f
 
 
 
 
5d37e56
3d6631f
 
 
 
5d37e56
 
 
3d6631f
a759474
 
5d37e56
a759474
 
 
5d37e56
 
 
 
a759474
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
# ==========================================================
# streamlit_app.py β€” Stable Layout (English Only) + Session Fix
# ==========================================================
import os
import re
import streamlit as st
import torch
from document_registry import DocumentRegistry


# ==========================================================
# βœ… PAGE CONFIGS
# ==========================================================
st.set_page_config(page_title="Enterprise Knowledge Assistant", layout="wide")
print("CUDA available:", torch.cuda.is_available())

# ==========================================================
# βš™οΈ CACHE SETUP
# ==========================================================
CACHE_DIR = "/tmp/hf_cache"
os.makedirs(CACHE_DIR, exist_ok=True)
os.environ.update({
    "HF_HOME": CACHE_DIR,
    "TRANSFORMERS_CACHE": CACHE_DIR,
    "HF_DATASETS_CACHE": CACHE_DIR,
    "HF_MODULES_CACHE": CACHE_DIR,
})

# ==========================================================
# πŸ“¦ IMPORTS
# ==========================================================
from ingestion import extract_text_from_pdf, chunk_text
from vectorstore import build_faiss_index
from qa import retrieve_chunks, generate_answer, cache_embeddings, embed_chunks, genai_generate

# ==========================================================
# 🧠 SMART SUGGESTION GENERATOR (English Only)
# ==========================================================
def generate_dynamic_suggestions_from_toc(toc, chunks, doc_name="Document"):
    """Generates 5–7 short, natural English questions based on TOC and document text."""
    if not toc or not chunks:
        return ["How do I start using this guide?", "What does this document cover?"]

    titles = []
    for sec, raw_title in toc:
        title = re.sub(r"^\s*[\dA-Za-z.\-]+\s*", "", raw_title)
        title = re.sub(r"\.{2,}\s*\d+$", "", title).strip()
        if 4 < len(title) < 120:
            titles.append(title)

    context_sample = " ".join(chunks[:3])[:4000]
    prompt = f"""
You are a content assistant. Based on the Table of Contents and the sample document text below,
generate 5–7 short, natural user-facing questions.
Each question should be under 18 words, end with a question mark, and sound human.
Document: "{doc_name}"

TABLE OF CONTENTS:
{chr(10).join(['- ' + t for t in titles[:8]])}

SAMPLE TEXT:
{context_sample}

Output: Write each question on a new line. Do not invent facts β€” base questions only on the document.
"""

    try:
        ai_response = genai_generate(prompt)
        lines = [ln.strip() for ln in ai_response.splitlines() if ln.strip()]
        questions = []
        for ln in lines:
            q = re.sub(r"^[\-\u2022\*\d\.\)\s]+", "", ln).strip()
            if not q.endswith("?") and len(q.split()) < 18 and re.match(r"(?i)^(what|how|why|where|who|when|which|can|does|is|are)\b", q):
                q += "?"
            if 8 <= len(q) <= 140:
                questions.append(q)
        # dedupe
        final, seen = [], set()
        for q in questions:
            if q.lower() not in seen:
                seen.add(q.lower())
                final.append(q)
        if not final:
            final = [f"What should I know about {t.rstrip('.')}?" for t in titles[:7]]
        return final[:7]
    except Exception:
        return ["How do I start using this guide?", "What does this document cover?"]


# ==========================================================
# 🎨 STYLING β€” REVERT TO ORIGINAL
# ==========================================================
st.markdown("""
<style>
div.block-container {padding-top: 1.2rem; max-width: 1080px;}
h1, h2, h3 {color: #f3f4f6; font-weight: 600;}
.suggest-chip {
    background: #0f1724;
    border: 1px solid #374151;
    border-radius: 14px;
    color: #e6eef8;
    padding: 8px 12px;
    cursor: pointer;
    font-size: 13px;
    margin: 6px 6px 6px 0;
    display: inline-block;
    transition: background 0.2s, transform 0.1s;
}
.suggest-chip:hover {background: #1e3a8a; transform: translateY(-2px);}
.answer-box {
    background: linear-gradient(180deg,#0b1220,#071027);
    border-left: 4px solid #3b82f6;
    border-radius: 8px;
    padding: 16px 18px;
    color: #e6eef8;
    margin-top: 12px;
    box-shadow: 0 4px 14px rgba(0,0,0,0.35);
}
.stTextInput > div > div > input {
    background-color: #0f172a !important;
    color: #f1f5f9 !important;
    border-radius: 6px !important;
    border: 1px solid #334155 !important;
    padding: 8px 10px !important;
    font-size: 15px !important;
}
.stTextInput > label {font-weight: 500;}
.small-link {
    font-size: 13px;
    color: #60a5fa;
    cursor: pointer;
}
</style>
""", unsafe_allow_html=True)

# ==========================================================
# 🧭 SIDEBAR β€” Clean, User-Focused
# ==========================================================
with st.sidebar:
    # --- Response Style ---
    st.markdown("### 🧭 Response Style")
    mode = st.radio(
        "",
        ("Strict (Document-only)", "Extended (Document + General)"),
        index=0,
        help="Strict = answers only from the uploaded document. Extended = may include related general info.",
    )

    st.markdown("---")

    # --- (Registry hidden: backend only) ---

    # --- Developer Options ---
    show_dev = st.checkbox("Show advanced settings (for developers)", value=False)
    if show_dev:
        st.markdown("### βš™οΈ Developer Options")
        chunk_size = st.slider("Chunk Size", 200, 1500, 1000, step=50)
        overlap = st.slider("Chunk Overlap", 50, 200, 120, step=10)
        top_k = st.slider("Top K Results", 1, 10, 7)
    else:
        chunk_size, overlap, top_k = 1000, 120, 5

    st.markdown("---")
    st.caption("✨ Built by Shubham Sharma")

    # --- Developer Insights (optional, hidden by default) ---
    if show_dev:
        st.markdown("---")
        with st.expander("🧩 Developer Insights", expanded=False):
            st.markdown("**Retrieved Chunks (Context):**")
            for i, r in enumerate(st.session_state.get("retrieved", []), start=1):
                st.markdown(f"- **Chunk {i}:** {r}")

            toc_data = st.session_state.get("toc", [])
            if toc_data:
                st.markdown("---")
                st.markdown("**Document Sections (TOC):**")
                toc_text = "\n".join([f"{sec}. {title}" for sec, title in toc_data])
                st.text_area("", toc_text, height=120)

            doc_text = st.session_state.get("text", "")
            if doc_text:
                st.markdown("---")
                st.markdown("**Document Preview:**")
                st.text_area("", doc_text[:1000], height=120)
                st.caption(f"{len(st.session_state.get('chunks', []))} chunks processed.")



# ==========================================================
# 🧠 SESSION STATE SAFETY INITIALIZATION
# ==========================================================
for key, val in {
    "user_query_input": "",
    "show_more": False,
    "selected_suggestion": None,
    "query_suggestions_fixed": None,
    "last_doc": None,
    "doc_lang": "en",
    "doc_ready": False,
}.items():
    if key not in st.session_state:
        st.session_state[key] = val

def set_user_query(q, idx):
    st.session_state["user_query_input"] = q
    st.session_state["selected_suggestion"] = idx
    st.experimental_rerun()

# ==========================================================
# πŸ“„ MAIN SECTION
# ==========================================================
st.title("πŸ“„ Enterprise Knowledge Assistant")
st.caption("Query SAP documentation and enterprise PDFs β€” powered by reasoning and retrieval.")

doc_choice = st.radio("Select a document:", ["-- Select --", "Sample PDF", "Upload Custom PDF"], index=0)

# ==========================================================
# πŸ“‚ DOCUMENT HANDLING β€” CLEAN, ACCURATE, AND BYTE-AWARE
# ==========================================================
import hashlib

def _hash_content(file_path):
    """Generate a short SHA256 hash of the file's actual binary content."""
    hasher = hashlib.sha256()
    with open(file_path, "rb") as f:
        while chunk := f.read(8192):
            hasher.update(chunk)
    return hasher.hexdigest()[:12]

if doc_choice == "-- Select --":
    st.info("⬅️ Select or upload a document to begin.")
else:
    temp_path = None
    if doc_choice == "Sample PDF":
        temp_path = os.path.join(os.path.dirname(__file__), "sample.pdf")
        st.markdown("βœ… **Sample PDF selected.** Preparing document...")
    else:
        uploaded_file = st.file_uploader("Upload a PDF document (max 200MB):", type="pdf", label_visibility="collapsed")
        if uploaded_file:
            temp_path = os.path.join("/tmp", uploaded_file.name)
            with open(temp_path, "wb") as f:
                f.write(uploaded_file.getbuffer())
        else:
            st.stop()

    if temp_path:
        doc_name = os.path.basename(temp_path)
        file_hash = _hash_content(temp_path)
        doc_identifier = f"{doc_name}_{file_hash}"

        if "doc_ready" not in st.session_state or st.session_state.get("last_doc") != doc_identifier:
            status = st.empty()
            status.info("πŸ“€ Upload complete β€” reading document...")

            text, toc, toc_source = extract_text_from_pdf(temp_path)
            status.info("πŸ“‘ Parsing and chunking document...")
            chunks = chunk_text(text, chunk_size=chunk_size, overlap=overlap)
            status.info("🧠 Building embeddings and search index...")
            embeddings = cache_embeddings(doc_name, chunks, embed_chunks)
            index = build_faiss_index(embeddings)

            registry = st.session_state.get("registry")
            if not registry:
                registry = DocumentRegistry()
                st.session_state["registry"] = registry

            registry.register(temp_path, chunks, embeddings, index)
            

            status.success("βœ… Document processed successfully β€” all set to query your assistant!")

            st.session_state.update({
                "text": text,
                "toc": toc,
                "chunks": chunks,
                "embeddings": embeddings,
                "index": index,
                "doc_ready": True,
                "last_doc": doc_identifier,
                "status_text": "βœ… Document processed successfully β€” all set to query your assistant!"
            })

            query_suggestions = generate_dynamic_suggestions_from_toc(toc, chunks, doc_name)
            st.session_state["query_suggestions_fixed"] = query_suggestions
            st.session_state["user_query_input"] = ""
            st.session_state["selected_suggestion"] = None
            st.session_state["show_more"] = False
            st.rerun()

        else:
            text = st.session_state["text"]
            toc = st.session_state["toc"]
            chunks = st.session_state["chunks"]
            embeddings = st.session_state["embeddings"]
            index = st.session_state["index"]
            query_suggestions = st.session_state.get("query_suggestions_fixed", [])
            st.info(st.session_state.get("status_text", f"πŸ“„ {doc_name} is ready for queries."))

        # --- Ask section ---
        st.markdown("### πŸ’¬ Ask the Assistant")
        if query_suggestions:
            visible = query_suggestions if st.session_state["show_more"] else query_suggestions[:3]
            cols = st.columns(min(3, len(visible)))
            for i, q in enumerate(visible):
                if cols[i % 3].button(f"πŸ’¬ {q}", key=f"sugg_{i}"):
                    set_user_query(q, i)

            toggle_text = "Show less β–²" if st.session_state["show_more"] else "Show more β–Ό"
            if st.button(toggle_text, help="Show or hide more suggestions"):
                st.session_state["show_more"] = not st.session_state["show_more"]
                st.rerun()

        user_query = st.text_input("Type your question or click one above:", key="user_query_input")

        if user_query.strip():
            reasoning_mode = mode == "Extended (Document + General)"
            with st.spinner("πŸ’­ Generating your answer..."):
                retrieved = retrieve_chunks(user_query, index, chunks, top_k=top_k, embeddings=embeddings)
                answer = generate_answer(user_query, retrieved, reasoning_mode=reasoning_mode)
                st.session_state["retrieved"] = retrieved

            st.markdown("### πŸ€– Assistant’s Answer")
            if not reasoning_mode and not answer.startswith("⚠️"):
                answer = re.sub(r"\*\*(.*?)\*\*", r"\1", answer)
                answer = re.sub(r"(^|\n)-\s*", r"\1<br>β€’ ", answer)
            st.markdown(f"<div class='answer-box'>{answer}</div>", unsafe_allow_html=True)

# ==========================================================
# 🎨 Optional Sidebar Scroll Styling (keeps it clean)
# ==========================================================
st.markdown("""
<style>
section[data-testid="stSidebar"] div.stExpander {
    max-height: 480px;
    overflow-y: auto;
}
</style>
""", unsafe_allow_html=True)