# ========================================================== # streamlit_app.py β€” Stable Layout (English Only) # ========================================================== import os import re import streamlit as st import torch # ========================================================== # βœ… PAGE CONFIGS # ========================================================== st.set_page_config(page_title="Enterprise Knowledge Assistant", layout="wide") print("CUDA available:", torch.cuda.is_available()) # ========================================================== # βš™οΈ CACHE SETUP # ========================================================== CACHE_DIR = "/tmp/hf_cache" os.makedirs(CACHE_DIR, exist_ok=True) os.environ.update({ "HF_HOME": CACHE_DIR, "TRANSFORMERS_CACHE": CACHE_DIR, "HF_DATASETS_CACHE": CACHE_DIR, "HF_MODULES_CACHE": CACHE_DIR, }) # ========================================================== # πŸ“¦ IMPORTS # ========================================================== from ingestion import extract_text_from_pdf, chunk_text from vectorstore import build_faiss_index from qa import retrieve_chunks, generate_answer, cache_embeddings, embed_chunks, genai_generate # ========================================================== # 🧠 SMART SUGGESTION GENERATOR (English Only) # ========================================================== def generate_dynamic_suggestions_from_toc(toc, chunks, doc_name="Document"): """ Generates 5–7 short, natural English questions based on TOC and document text. """ if not toc or not chunks: return ["How do I start using this guide?", "What does this document cover?"] titles = [] for sec, raw_title in toc: title = re.sub(r"^\s*[\dA-Za-z.\-]+\s*", "", raw_title) title = re.sub(r"\.{2,}\s*\d+$", "", title).strip() if 4 < len(title) < 120: titles.append(title) context_sample = " ".join(chunks[:3])[:4000] prompt = f""" You are a content assistant. Based on the Table of Contents and the sample document text below, generate 5–7 short, natural user-facing questions. Each question should be under 18 words, end with a question mark, and sound human. Document: "{doc_name}" TABLE OF CONTENTS: {chr(10).join(['- ' + t for t in titles[:8]])} SAMPLE TEXT: {context_sample} Output: Write each question on a new line. Do not invent facts β€” base questions only on the document. """ try: ai_response = genai_generate(prompt) lines = [ln.strip() for ln in ai_response.splitlines() if ln.strip()] questions = [] for ln in lines: q = re.sub(r"^[\-\u2022\*\d\.\)\s]+", "", ln).strip() if not q.endswith("?") and len(q.split()) < 18 and re.match(r"(?i)^(what|how|why|where|who|when|which|can|does|is|are)\b", q): q += "?" if 8 <= len(q) <= 140: questions.append(q) # dedupe final = [] seen = set() for q in questions: if q.lower() not in seen: seen.add(q.lower()) final.append(q) if not final: final = [f"What should I know about {t.rstrip('.')}?" for t in titles[:7]] return final[:7] except Exception: return ["How do I start using this guide?", "What does this document cover?"] # ========================================================== # 🎨 STYLING β€” REVERT TO ORIGINAL # ========================================================== st.markdown(""" """, unsafe_allow_html=True) # ========================================================== # 🧭 SIDEBAR # ========================================================== with st.sidebar: st.markdown("### 🧭 Response Style") mode = st.radio( "", ("Strict (Document-only)", "Extended (Document + General)"), index=0, help="Strict = answers only from the uploaded document. Extended = may include related general info.", ) st.markdown("---") show_dev = st.checkbox("Show advanced settings (for developers)", value=False) if show_dev: st.markdown("### βš™οΈ Developer Options") chunk_size = st.slider("Chunk Size", 200, 1500, 1000, step=50) overlap = st.slider("Chunk Overlap", 50, 200, 120, step=10) top_k = st.slider("Top K Results", 1, 10, 7) else: chunk_size, overlap, top_k = 1000, 120, 5 st.markdown("---") st.caption("✨ Built by Shubham Sharma") # 🧩 Developer Insights (Moved up here from main block) if show_dev: st.markdown("---") with st.expander("🧩 Developer Insights", expanded=False): st.markdown("**Retrieved Chunks (Context):**") retrieved_chunks = st.session_state.get("retrieved", []) for i, r in enumerate(retrieved_chunks, start=1): st.markdown(f"- **Chunk {i}:** {r}") toc_data = st.session_state.get("toc", []) if toc_data: st.markdown("---") st.markdown("**Document Sections (TOC):**") toc_text = "\n".join([f"{sec}. {title}" for sec, title in toc_data]) st.text_area("", toc_text, height=120) doc_text = st.session_state.get("text", "") if doc_text: st.markdown("---") st.markdown("**Document Preview:**") st.text_area("", doc_text[:1000], height=120) st.caption(f"{len(st.session_state.get('chunks', []))} chunks processed.") # ========================================================== # 🧠 SESSION STATE # ========================================================== for key, val in { "user_query_input": "", "show_more": False, "selected_suggestion": None, "query_suggestions_fixed": None, "last_doc": None, "doc_lang": "en", # πŸ†• optional: store document language }.items(): if key not in st.session_state: st.session_state[key] = val def set_user_query(q, idx): st.session_state["user_query_input"] = q st.session_state["selected_suggestion"] = idx st.experimental_rerun() # ========================================================== # πŸ“„ MAIN SECTION # ========================================================== st.title("πŸ“„ Enterprise Knowledge Assistant") st.caption("Query SAP documentation and enterprise PDFs β€” powered by reasoning and retrieval.") doc_choice = st.radio("Select a document:", ["-- Select --", "Sample PDF", "Upload Custom PDF"], index=0) # ========================================================== # πŸ“‚ DOCUMENT HANDLING β€” CLEAN + ACCURATE UI FLOW (final polished) # ========================================================== if doc_choice == "-- Select --": st.info("⬅️ Select or upload a document to begin.") else: temp_path = None # --- File selection --- if doc_choice == "Sample PDF": temp_path = os.path.join(os.path.dirname(__file__), "sample.pdf") st.markdown("βœ… **Sample PDF selected.** Preparing document...") else: uploaded_file = st.file_uploader( "Upload a PDF document (max 200MB):", type="pdf", label_visibility="collapsed" ) if uploaded_file: temp_path = os.path.join("/tmp", uploaded_file.name) with open(temp_path, "wb") as f: f.write(uploaded_file.getbuffer()) else: st.stop() # --- Real processing begins here --- if temp_path: doc_name = os.path.basename(temp_path) # Process only once per document if "doc_ready" not in st.session_state or st.session_state.get("last_doc") != doc_name: status = st.empty() status.info("πŸ“€ Upload complete β€” reading document...") text, toc, toc_source = extract_text_from_pdf(temp_path) status.info("πŸ“‘ Parsing and chunking document...") chunks = chunk_text(text, chunk_size=chunk_size, overlap=overlap) status.info("🧠 Building embeddings and search index...") embeddings = cache_embeddings(doc_name, chunks, embed_chunks) index = build_faiss_index(embeddings) # βœ… Smooth transition for ready state import time status.success("βœ… Document processed successfully.") time.sleep(1.2) status.empty() # βœ… Simplified final ready message for reruns st.session_state.update({ "text": text, "toc": toc, "chunks": chunks, "embeddings": embeddings, "index": index, "doc_ready": True, "last_doc": doc_name, "status_text": f"πŸ“„ {doc_name} ready β€” your AI assistant is standing by!" }) # Build suggestions once query_suggestions = generate_dynamic_suggestions_from_toc(toc, chunks, doc_name) st.session_state["query_suggestions_fixed"] = query_suggestions st.session_state["user_query_input"] = "" st.session_state["selected_suggestion"] = None st.session_state["show_more"] = False st.rerun() else: # Reuse cached content and show final ready message with fade-in text = st.session_state["text"] toc = st.session_state["toc"] chunks = st.session_state["chunks"] embeddings = st.session_state["embeddings"] index = st.session_state["index"] query_suggestions = st.session_state.get("query_suggestions_fixed", []) # ✨ Fading "ready" message for better UX st.markdown(f"""
βœ… {st.session_state.get('status_text', f"{doc_name} ready for queries.")}
""", unsafe_allow_html=True) # --- Ask section --- st.markdown("### πŸ’¬ Ask the Assistant") if query_suggestions: visible = query_suggestions if st.session_state["show_more"] else query_suggestions[:3] cols = st.columns(min(3, len(visible))) for i, q in enumerate(visible): if cols[i % 3].button(f"πŸ’¬ {q}", key=f"sugg_{i}"): set_user_query(q, i) toggle_text = "Show less β–²" if st.session_state["show_more"] else "Show more β–Ό" if st.button(toggle_text, help="Show or hide more suggestions"): st.session_state["show_more"] = not st.session_state["show_more"] st.rerun() user_query = st.text_input("Type your question or click one above:", key="user_query_input") if user_query.strip(): reasoning_mode = mode == "Extended (Document + General)" with st.spinner("πŸ’­ Generating your answer..."): retrieved = retrieve_chunks(user_query, index, chunks, top_k=top_k, embeddings=embeddings) answer = generate_answer(user_query, retrieved, reasoning_mode=reasoning_mode) st.session_state["retrieved"] = retrieved st.markdown("### πŸ€– Assistant’s Answer") if not reasoning_mode and not answer.startswith("⚠️"): answer = re.sub(r"\*\*(.*?)\*\*", r"\1", answer) answer = re.sub(r"(^|\n)-\s*", r"\1
β€’ ", answer) st.markdown(f"
{answer}
", unsafe_allow_html=True) # ========================================================== # 🎨 Optional Sidebar Scroll Styling (keeps it clean) # ========================================================== st.markdown(""" """, unsafe_allow_html=True)