File size: 8,733 Bytes
28e4d2b c220dec 28e4d2b c5f8de4 c220dec 28e4d2b 6f0d970 c220dec 28e4d2b c220dec 28e4d2b c220dec 28e4d2b c220dec 28e4d2b c220dec 28e4d2b c220dec 28e4d2b c220dec 28e4d2b c220dec dbc6ce8 4687fa9 cc58d64 4687fa9 c220dec e9c70f2 dee7464 c220dec 6944855 54be71f c220dec e152803 cc58d64 e152803 c220dec e152803 a0dee9a 6944855 a0dee9a a1144d8 a0dee9a 6944855 a0dee9a 24deec1 66bfc48 c941464 24deec1 a0dee9a 24deec1 c220dec 6944855 24deec1 6944855 c220dec 24deec1 6944855 cc58d64 24deec1 6944855 0cacffd 6944855 c220dec 6944855 cc58d64 6944855 c220dec 6944855 cc58d64 c220dec 6944855 885d81f 6944855 24deec1 6944855 885d81f 24deec1 885d81f cc58d64 b0240bf 81777c1 376f736 24deec1 885d81f 6944855 c220dec 24deec1 c220dec cc58d64 6944855 cc58d64 885d81f cc58d64 885d81f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 |
import os
import shutil
import streamlit as st
import torch
print("CUDA available:", torch.cuda.is_available())
print("Device count:", torch.cuda.device_count())
if torch.cuda.is_available():
print("GPU name:", torch.cuda.get_device_name(0))
else:
print("Running on CPU")
# ==========================================================
# β
Page Configuration (must be first Streamlit command)
# ==========================================================
st.set_page_config(
page_title="Enterprise Knowledge Assistant",
layout="wide"
)
# ==========================================================
# π§Ή Cache Management (prevents Hugging Face 50GB overflow)
# ==========================================================
def clean_cache(max_size_gb: float = 2.0):
"""
Cleans large cache folders (> max_size_gb), preserving /tmp/hf_cache if small.
"""
folders = [
"/root/.cache/huggingface",
"/root/.cache/transformers",
"/root/.cache/torch",
# "/tmp/hf_cache", # π« DO NOT DELETE: used by Mistral for offloading
]
total_deleted = 0.0
for folder in folders:
if os.path.exists(folder):
# estimate folder size
size_gb = sum(
os.path.getsize(os.path.join(dp, f))
for dp, _, files in os.walk(folder)
for f in files
) / (1024**3)
# only delete if large
if size_gb > max_size_gb or "torch" in folder:
shutil.rmtree(folder, ignore_errors=True)
total_deleted += size_gb
print(f"ποΈ Deleted {folder} ({size_gb:.2f} GB)")
else:
print(f"β
Preserved {folder} ({size_gb:.2f} GB)")
os.makedirs("/tmp/hf_cache", exist_ok=True)
print(f"π§Ή Cache cleanup done. ~{total_deleted:.2f} GB removed.")
def check_disk_usage():
"""Show disk usage info in sidebar."""
st.sidebar.markdown("### πΎ Disk Usage (Debug)")
try:
usage = os.popen("du -sh /root/.cache /tmp 2>/dev/null").read()
st.sidebar.text(usage if usage else "No cache directories found.")
except Exception as e:
st.sidebar.text(f"β οΈ Disk usage check failed: {e}")
# Run cleanup & diagnostics
clean_cache()
check_disk_usage()
# ==========================================================
# βοΈ Hugging Face Cache Configuration (/tmp for writable path)
# ==========================================================
CACHE_DIR = "/tmp/hf_cache"
os.makedirs(CACHE_DIR, exist_ok=True)
os.environ.update({
"HF_HOME": CACHE_DIR,
"TRANSFORMERS_CACHE": CACHE_DIR,
"HF_DATASETS_CACHE": CACHE_DIR,
"HF_MODULES_CACHE": CACHE_DIR
})
# ==========================================================
# π¦ Imports AFTER environment setup
# ==========================================================
from ingestion import extract_text_from_pdf, chunk_text
from embeddings import generate_embeddings
from vectorstore import build_faiss_index
from qa import retrieve_chunks, generate_answer
# ==========================================================
# π Paths
# ==========================================================
BASE_DIR = os.path.dirname(__file__) # /app/src
LOGO_PATH = os.path.join(BASE_DIR, "logo.png")
SAMPLE_PATH = os.path.join(BASE_DIR, "sample.pdf")
# ==========================================================
# π₯οΈ UI Header
# ==========================================================
st.title("π Enterprise Knowledge Assistant")
st.caption("Upload a PDF or use the sample file to explore intelligent document Q&A.")
# ==========================================================
# π§ Sidebar (Document Library + Settings + Diagnostics)
# ==========================================================
with st.sidebar:
# πΌοΈ App Logo (if available)
if os.path.exists(LOGO_PATH):
st.image(LOGO_PATH, width=150)
# π§ Reasoning Mode Toggle (Persistent)
if "reasoning_mode" not in st.session_state:
st.session_state.reasoning_mode = False # Default OFF
st.session_state.reasoning_mode = st.toggle(
"π§ Enable Reasoning Mode",
value=st.session_state.reasoning_mode,
help=(
"When ON, the assistant can use its world knowledge and reasoning ability "
"to generate richer, more explanatory answers.\n\n"
"When OFF, it sticks strictly to the document text for factual accuracy."
)
)
st.markdown("---")
# π Document Library
st.header("π Document Library")
doc_choice = st.radio(
"Choose a document:",
["-- Select --", "Sample PDF", "Upload Custom PDF"],
index=0
)
st.markdown("---")
# βοΈ Settings
st.header("βοΈ Settings")
chunk_size = st.slider("Chunk Size (characters)", 200, 800, 500, step=50)
overlap = st.slider("Chunk Overlap (characters)", 50, 200, 120, step=10)
top_k = st.slider("Top K Results (retrieved chunks)", 1, 10, 5)
st.markdown("---")
# π¨βπ» Branding
st.caption("π¨βπ» Built by Shubham Sharma")
st.markdown("[π GitHub Repo](https://github.com/shubhamsharma170793-cpu/enterprise-knowledge-assistant)")
# ==========================================================
# π§Ύ Document Handling
# ==========================================================
text, chunks, index = None, None, None
if doc_choice == "-- Select --":
st.info("β¬
οΈ Please choose **Sample PDF** or **Upload Custom PDF** from the sidebar.")
elif doc_choice == "Sample PDF":
temp_path = SAMPLE_PATH
st.success("π Using built-in Sample PDF")
with st.spinner("π Extracting and processing document..."):
text = extract_text_from_pdf(temp_path)
chunks = chunk_text(text, chunk_size=chunk_size)
embeddings = generate_embeddings(chunks)
index = build_faiss_index(embeddings)
elif doc_choice == "Upload Custom PDF":
uploaded_file = st.file_uploader("π Upload your PDF", type="pdf")
if uploaded_file:
temp_path = os.path.join("/tmp", uploaded_file.name)
with open(temp_path, "wb") as f:
f.write(uploaded_file.getbuffer())
st.success(f"β
File '{uploaded_file.name}' uploaded successfully")
with st.spinner("βοΈ Extracting and processing your document..."):
text = extract_text_from_pdf(temp_path)
chunks = chunk_text(text, chunk_size=chunk_size)
embeddings = generate_embeddings(chunks)
index = build_faiss_index(embeddings)
st.success("π Document processed successfully!")
# ==========================================================
# π Document Preview
# ==========================================================
if chunks:
st.subheader("π Document Preview")
st.text_area("Extracted text (first 1000 chars)", text[:1000], height=200)
avg_len = int(sum(len(c) for c in chunks) / len(chunks))
st.caption(f"π¦ {len(chunks)} chunks created | Avg chunk length: {avg_len} chars")
# ---------------------------
# Query Section
# ---------------------------
if index and chunks:
st.markdown("---")
st.subheader("π€ Ask a Question")
user_query = st.text_input("π Your question about the document:")
if user_query:
# Show which mode is active
mode_label = (
"π§ Reasoning Mode (expanded thinking)"
if st.session_state.reasoning_mode
else "π Strict Document Mode (factual only)"
)
st.caption(f"Mode: {mode_label}")
# Generate the answer
with st.spinner("π§ Thinking... retrieving context and generating answer..."):
retrieved = retrieve_chunks(user_query, index, chunks, top_k=top_k)
answer = generate_answer(user_query, retrieved, reasoning_mode=st.session_state.reasoning_mode)
# β
Display Answer
st.markdown("### β
Assistantβs Answer")
st.markdown(
f"<div style='background-color:#0E1117;padding:12px;border-radius:10px;color:white;'>{answer}</div>",
unsafe_allow_html=True
)
# π Supporting Chunks
with st.expander("π Supporting Chunks (Context Used)"):
for i, r in enumerate(retrieved, start=1):
st.markdown(
f"""
<div style='background-color:#111827;padding:10px;border-radius:8px;margin-bottom:6px;'>
<b>Chunk {i}:</b><br>{r}
</div>
""",
unsafe_allow_html=True,
)
else:
st.info("π₯ Upload or select a document to start exploring.")
|