|
|
import os |
|
|
|
|
|
CACHE_DIR = "/tmp/hf_cache" |
|
|
os.makedirs(CACHE_DIR, exist_ok=True) |
|
|
|
|
|
os.environ["HF_HOME"] = CACHE_DIR |
|
|
os.environ["TRANSFORMERS_CACHE"] = CACHE_DIR |
|
|
os.environ["HF_DATASETS_CACHE"] = CACHE_DIR |
|
|
os.environ["HF_MODULES_CACHE"] = CACHE_DIR |
|
|
|
|
|
print(f"β
Using Hugging Face cache at {CACHE_DIR}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import streamlit as st |
|
|
from ingestion import extract_text_from_pdf, chunk_text |
|
|
from embeddings import generate_embeddings |
|
|
from vectorstore import build_faiss_index |
|
|
from qa import retrieve_chunks, generate_answer |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.set_page_config(page_title="Enterprise Knowledge Assistant", layout="wide") |
|
|
st.title("π Enterprise Knowledge Assistant") |
|
|
|
|
|
st.write("Upload a PDF **or try the sample file** to explore this assistant.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.image("src/logo.png", width=150) |
|
|
st.header("βοΈ Settings") |
|
|
chunk_size = st.slider("Chunk Size", 200, 1000, 500, step=100) |
|
|
top_k = st.slider("Top K Results", 1, 5, 3) |
|
|
|
|
|
st.markdown("---") |
|
|
st.caption("π¨βπ» Built by Shubham Sharma") |
|
|
st.markdown("[π GitHub Repo](https://github.com/shubhamsharma170793-cpu/enterprise-knowledge-assistant)") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uploaded_file = st.file_uploader("π Upload your PDF", type="pdf") |
|
|
|
|
|
if st.button("π Try with Sample PDF"): |
|
|
uploaded_file = open("app/sample.pdf", "rb") |
|
|
st.session_state["use_sample"] = True |
|
|
else: |
|
|
st.session_state["use_sample"] = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if uploaded_file: |
|
|
if st.session_state.get("use_sample", False): |
|
|
temp_path = os.path.join("app", "sample.pdf") |
|
|
st.info("Using **default sample.pdf** β
") |
|
|
else: |
|
|
temp_path = os.path.join("temp.pdf") |
|
|
with open(temp_path, "wb") as f: |
|
|
f.write(uploaded_file.getbuffer()) |
|
|
|
|
|
|
|
|
text = extract_text_from_pdf(temp_path) |
|
|
st.subheader("π Extracted Text (Preview)") |
|
|
st.write(text[:1000]) |
|
|
|
|
|
|
|
|
chunks = chunk_text(text, chunk_size=chunk_size) |
|
|
st.write(f"π¦ Total Chunks Created: {len(chunks)}") |
|
|
|
|
|
st.subheader("π§© Chunked Text (First 3 Chunks)") |
|
|
for i, chunk in enumerate(chunks[:3], start=1): |
|
|
st.write(f"**Chunk {i}:** {chunk}") |
|
|
|
|
|
|
|
|
embeddings = generate_embeddings(chunks) |
|
|
st.success(f"β
Generated {len(embeddings)} embeddings.") |
|
|
|
|
|
|
|
|
index = build_faiss_index(embeddings) |
|
|
|
|
|
|
|
|
user_query = st.text_input("π Ask a question about the document:") |
|
|
|
|
|
if user_query: |
|
|
retrieved = retrieve_chunks(user_query, index, chunks, top_k=top_k) |
|
|
answer = generate_answer(user_query, retrieved) |
|
|
|
|
|
|
|
|
st.subheader("π€ Assistantβs Answer") |
|
|
st.write(answer) |
|
|
|
|
|
|
|
|
st.subheader("π Supporting Chunks") |
|
|
for i, r in enumerate(retrieved, start=1): |
|
|
st.write(f"**Chunk {i}:** {r}") |
|
|
else: |
|
|
st.info("β¬
οΈ Upload a PDF or click 'Try with Sample PDF' to begin.") |
|
|
|