import streamlit as st from PyPDF2 import PdfReader from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_huggingface import HuggingFaceEmbeddings, HuggingFacePipeline from langchain_community.vectorstores import FAISS from langchain_core.prompts import PromptTemplate from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import torch import time import base64 import html as html_module from datetime import datetime, timezone, timedelta # ─── TIMEZONE (IST = UTC+5:30) ──────────────────────────────────────────────── IST = timezone(timedelta(hours=5, minutes=30)) def get_ist_time(): return datetime.now(IST).strftime("%H:%M") st.set_page_config( page_title="QueryDocs AI", page_icon="📚", layout="wide", initial_sidebar_state="expanded" ) # ─── HELPERS ────────────────────────────────────────────────────────────────── def img_to_base64(path): try: with open(path, "rb") as f: return base64.b64encode(f.read()).decode() except: return None # ─── GLOBAL CSS ─────────────────────────────────────────────────────────────── st.markdown(""" """, unsafe_allow_html=True) # ─── SESSION STATE ──────────────────────────────────────────────────────────── if "messages" not in st.session_state: st.session_state.messages = [] if "vectorstore" not in st.session_state: st.session_state.vectorstore = None if "pdf_name" not in st.session_state: st.session_state.pdf_name = None if "pdf_pages" not in st.session_state: st.session_state.pdf_pages = 0 if "pdf_chunks" not in st.session_state: st.session_state.pdf_chunks = 0 if "q_count" not in st.session_state: st.session_state.q_count = 0 if "last_q" not in st.session_state: st.session_state.last_q = "" if "input_key" not in st.session_state: st.session_state.input_key = 0 if "pending_input" not in st.session_state: st.session_state.pending_input = "" # ─── MODEL LOADERS ──────────────────────────────────────────────────────────── @st.cache_resource(show_spinner=False) def load_embeddings(): return HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") @st.cache_resource(show_spinner=False) def load_llm(): model_id = "TinyLlama/TinyLlama-1.1B-chat-v1.0" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, low_cpu_mem_usage=True, device_map="cuda" if torch.cuda.is_available() else None ) if not torch.cuda.is_available(): model = model.to("cpu") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.3, do_sample=True, pad_token_id=tokenizer.eos_token_id, repetition_penalty=1.1 ) return HuggingFacePipeline(pipeline=pipe) # ─── PDF PROCESSOR ──────────────────────────────────────────────────────────── def process_pdf(uploaded_file): reader = PdfReader(uploaded_file) raw_text = "" for page in reader.pages: text = page.extract_text() if text: raw_text += text if not raw_text.strip(): raise ValueError("No readable text found. PDF may be scanned/image-based.") splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) chunks = splitter.split_text(raw_text) embeddings = load_embeddings() vectorstore = FAISS.from_texts(chunks, embeddings) return vectorstore, len(reader.pages), len(chunks) # ─── ANSWER FUNCTION ────────────────────────────────────────────────────────── def get_answer(question, vectorstore): retriever = vectorstore.as_retriever(search_kwargs={"k": 4}) relevant_docs = retriever.invoke(question) context = "\n\n".join([f"---\n{doc.page_content}" for doc in relevant_docs]) sources = [doc.page_content[:120] + "..." for doc in relevant_docs] prompt_template = PromptTemplate( input_variables=["context", "question"], template="""<|system|> You are QueryDocs AI, an intelligent document assistant. Use ONLY the context provided to answer the question clearly and accurately. If the answer is not in the context, say so honestly. <|user|> CONTEXT: {context} QUESTION: {question} <|assistant|> """ ) llm = load_llm() chain = prompt_template | llm result = chain.invoke({"context": context, "question": question}) if "<|assistant|>" in result: answer = result.split("<|assistant|>")[-1].strip() else: answer = result.strip() return answer, sources # ─── SIDEBAR ────────────────────────────────────────────────────────────────── with st.sidebar: # Profile card img_b64 = img_to_base64("assets/NANII.png") avatar = (f'Sriram' if img_b64 else '
') st.markdown(f"""
{avatar}
SRIRAM SAI
AI & ML ENGINEER
""", unsafe_allow_html=True) st.markdown('
📄 UPLOAD DOCUMENT
', unsafe_allow_html=True) uploaded_file = st.file_uploader("Upload PDF", type=["pdf"], label_visibility="collapsed") if uploaded_file: if st.session_state.pdf_name != uploaded_file.name: with st.spinner("🔍 Processing PDF..."): try: vs, pages, chunks = process_pdf(uploaded_file) st.session_state.vectorstore = vs st.session_state.pdf_name = uploaded_file.name st.session_state.pdf_pages = pages st.session_state.pdf_chunks = chunks st.session_state.messages = [] st.session_state.q_count = 0 st.success("✅ PDF ready!") except Exception as e: st.error(f"❌ {str(e)}") st.markdown("---") if st.session_state.pdf_name: st.markdown(f"""
📊 DOCUMENT STATS
📄 {st.session_state.pdf_name[:22]}{'...' if len(st.session_state.pdf_name)>22 else ''}
{st.session_state.pdf_pages}
PAGES
{st.session_state.pdf_chunks}
CHUNKS
{st.session_state.q_count}
ASKED
""", unsafe_allow_html=True) st.markdown("---") if st.button("🗑️ CLEAR CHAT", use_container_width=True): st.session_state.messages = [] st.session_state.q_count = 0 st.rerun() if st.button("📄 LOAD NEW PDF", use_container_width=True): st.session_state.vectorstore = None st.session_state.pdf_name = None st.session_state.pdf_pages = 0 st.session_state.pdf_chunks = 0 st.session_state.messages = [] st.session_state.q_count = 0 st.rerun() # ─── MAIN AREA ──────────────────────────────────────────────────────────────── st.markdown("""
QUERYDOCS AI 📚
INTELLIGENT DOCUMENT Q&A · RAG PIPELINE
""", unsafe_allow_html=True) if st.session_state.pdf_name: safe_pdf_name = html_module.escape(st.session_state.pdf_name) # FIX: XSS st.markdown(f"""
📄
{safe_pdf_name}
{st.session_state.pdf_pages} pages · {st.session_state.pdf_chunks} chunks · ready to query
● ACTIVE
""", unsafe_allow_html=True) if not st.session_state.vectorstore: st.markdown("""
📚
WELCOME TO QUERYDOCS AI
Upload any PDF document from the sidebar and start asking questions.
Powered by RAG pipeline — context-aware answers.

📋 Legal documents 📊 Research papers 📖 Study material 📝 Reports
""", unsafe_allow_html=True) else: # ── Chat history ── if st.session_state.messages: for msg in st.session_state.messages: ts = msg.get("time", "") if msg["role"] == "user": safe_user = html_module.escape(msg["content"]) # FIX: XSS st.markdown(f"""
{safe_user}
YOU · {ts}
""", unsafe_allow_html=True) else: col_ai, col_space = st.columns([4, 1]) with col_ai: st.markdown(f"""
// QUERYDOCS RESPONSE
""", unsafe_allow_html=True) st.markdown(f'
', unsafe_allow_html=True) st.markdown(msg["content"]) st.markdown('
', unsafe_allow_html=True) # Source chips if msg.get("sources"): chips = "".join(f'📎 Chunk {i+1}' for i, _ in enumerate(msg["sources"])) st.markdown(f"""
// SOURCE CHUNKS USED
{chips}
""", unsafe_allow_html=True) # Bubble footer st.markdown(f"""
📚 QUERYDOCS AI · {ts} · {msg.get("elapsed","?")}s
""", unsafe_allow_html=True) else: st.markdown("""
💬
DOCUMENT LOADED — START ASKING
Ask anything about the uploaded document.

💡 Summarize this document 💡 What are the key findings? 💡 List all important dates
""", unsafe_allow_html=True) typing_slot = st.empty() # ── Input row — merged input+button ── st.markdown('
', unsafe_allow_html=True) _current_key = f"question_input_{st.session_state.input_key}" def _sync_pending(): st.session_state.pending_input = st.session_state.get(_current_key, "") col_q, col_btn = st.columns([6, 0.7]) with col_q: question = st.text_input( "", placeholder="ask a question about your document...", label_visibility="collapsed", key=_current_key, on_change=_sync_pending ) if question: st.session_state.pending_input = question with col_btn: ask_btn = st.button("▶ ASK", use_container_width=True) trigger_q = (st.session_state.get("pending_input", "") or question).strip() # ── Generate answer ── if trigger_q and trigger_q != st.session_state.last_q: # FIX: set last_q and clear input IMMEDIATELY to prevent double-trigger st.session_state.last_q = trigger_q st.session_state.input_key += 1 st.session_state.pending_input = "" ts = get_ist_time() # FIX: IST time instead of UTC server time st.session_state.messages.append({ "role": "user", "content": trigger_q, "time": ts }) st.session_state.q_count += 1 typing_slot.markdown("""
""", unsafe_allow_html=True) try: start = time.time() answer, sources = get_answer(trigger_q, st.session_state.vectorstore) elapsed = round(time.time() - start, 1) st.session_state.messages.append({ "role": "assistant", "content": answer, "sources": sources, "time": get_ist_time(), # FIX: IST time "elapsed": elapsed }) except Exception as e: st.session_state.messages.append({ "role": "assistant", "content": f"⚠️ Error generating answer: {str(e)}", "sources": [], "time": get_ist_time(), "elapsed": 0 }) typing_slot.empty() st.rerun()