|
|
import os |
|
|
import tempfile |
|
|
import pickle |
|
|
from typing import List, Dict, Any |
|
|
|
|
|
import streamlit as st |
|
|
import numpy as np |
|
|
|
|
|
from sentence_transformers import SentenceTransformer |
|
|
import faiss |
|
|
import PyPDF2 |
|
|
import docx |
|
|
from groq import Groq |
|
|
|
|
|
DEFAULT_EMBED_MODEL = "all-MiniLM-L6-v2" |
|
|
DEFAULT_CHUNK_SIZE = 200 |
|
|
DEFAULT_CHUNK_OVERLAP = 50 |
|
|
DEFAULT_TOP_K = 5 |
|
|
INDEX_PATH = "faiss_index.bin" |
|
|
METADATA_PATH = "metadata.pkl" |
|
|
|
|
|
@st.cache_resource |
|
|
def load_embedding_model(model_name: str = DEFAULT_EMBED_MODEL): |
|
|
return SentenceTransformer(model_name) |
|
|
|
|
|
|
|
|
def save_uploaded_file(uploaded_file) -> str: |
|
|
tmpdir = tempfile.gettempdir() |
|
|
safe_name = os.path.basename(uploaded_file.name) |
|
|
temp_path = os.path.join(tmpdir, safe_name) |
|
|
with open(temp_path, "wb") as f: |
|
|
f.write(uploaded_file.getbuffer()) |
|
|
return temp_path |
|
|
|
|
|
|
|
|
def extract_text_from_pdf(path: str) -> str: |
|
|
text_pages = [] |
|
|
try: |
|
|
reader = PyPDF2.PdfReader(path) |
|
|
for page in reader.pages: |
|
|
page_text = page.extract_text() or "" |
|
|
text_pages.append(page_text) |
|
|
except Exception: |
|
|
return "" |
|
|
return "\n\n".join(text_pages) |
|
|
|
|
|
|
|
|
def extract_text_from_docx(path: str) -> str: |
|
|
try: |
|
|
doc = docx.Document(path) |
|
|
paragraphs = [p.text for p in doc.paragraphs] |
|
|
return "\n\n".join(paragraphs) |
|
|
except Exception: |
|
|
return "" |
|
|
|
|
|
|
|
|
def extract_text_from_txt(path: str) -> str: |
|
|
try: |
|
|
with open(path, "r", encoding="utf-8", errors="ignore") as f: |
|
|
return f.read() |
|
|
except Exception: |
|
|
return "" |
|
|
|
|
|
|
|
|
def chunk_text(text: str, chunk_size: int = DEFAULT_CHUNK_SIZE, overlap: int = DEFAULT_CHUNK_OVERLAP) -> List[str]: |
|
|
words = text.split() |
|
|
chunks: List[str] = [] |
|
|
start = 0 |
|
|
n = len(words) |
|
|
while start < n: |
|
|
end = min(start + chunk_size, n) |
|
|
chunk = " ".join(words[start:end]) |
|
|
chunks.append(chunk) |
|
|
if end == n: |
|
|
break |
|
|
start = max(0, end - overlap) |
|
|
return chunks |
|
|
|
|
|
|
|
|
def build_faiss_index(embeddings: np.ndarray) -> faiss.IndexFlat: |
|
|
embeddings = np.ascontiguousarray(embeddings.astype('float32')) |
|
|
d = embeddings.shape[1] |
|
|
index = faiss.IndexFlatIP(d) |
|
|
index.add(embeddings) |
|
|
return index |
|
|
|
|
|
|
|
|
def normalize_embeddings(vecs: np.ndarray) -> np.ndarray: |
|
|
vecs = np.asarray(vecs, dtype=np.float32) |
|
|
norms = np.linalg.norm(vecs, axis=1, keepdims=True) |
|
|
norms[norms == 0] = 1.0 |
|
|
return vecs / norms |
|
|
|
|
|
|
|
|
def save_index_and_metadata(index: faiss.Index, metadata: List[Dict[str, Any]], index_path: str = INDEX_PATH, meta_path: str = METADATA_PATH): |
|
|
try: |
|
|
faiss.write_index(index, index_path) |
|
|
with open(meta_path, "wb") as f: |
|
|
pickle.dump(metadata, f) |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
|
|
|
def load_index_and_metadata(index_path: str = INDEX_PATH, meta_path: str = METADATA_PATH): |
|
|
try: |
|
|
if os.path.exists(index_path) and os.path.exists(meta_path): |
|
|
index = faiss.read_index(index_path) |
|
|
with open(meta_path, "rb") as f: |
|
|
metadata = pickle.load(f) |
|
|
return index, metadata |
|
|
except Exception: |
|
|
return None, None |
|
|
return None, None |
|
|
|
|
|
|
|
|
def get_groq_client_from_env() -> Groq | None: |
|
|
key = os.environ.get("GROQ_API_KEY") or os.environ.get("HF_GROQ_API_KEY") or os.environ.get("HF_API_TOKEN") |
|
|
if not key: |
|
|
return None |
|
|
return Groq(api_key=key) |
|
|
|
|
|
|
|
|
def groq_chat_answer(client: Groq, messages: List[Dict[str, str]], model: str = "llama-3.3-70b-versatile", temperature: float = 0.0, max_tokens: int = 512) -> str: |
|
|
try: |
|
|
chat_completion = client.chat.completions.create( |
|
|
messages=messages, |
|
|
model=model, |
|
|
temperature=temperature, |
|
|
max_tokens=max_tokens, |
|
|
) |
|
|
return getattr(chat_completion.choices[0].message, 'content', '') |
|
|
except Exception: |
|
|
return "" |
|
|
|
|
|
|
|
|
def main(): |
|
|
st.set_page_config(page_title="RAG with Groq + Sentence-Transformers", layout="wide") |
|
|
st.title("RAG (Retrieval Augmented Generation) — Groq + sentence-transformers + FAISS") |
|
|
|
|
|
st.sidebar.header("Index / Retrieval settings") |
|
|
chunk_size = int(st.sidebar.number_input("Chunk size (words)", min_value=50, max_value=2000, value=DEFAULT_CHUNK_SIZE, step=50)) |
|
|
chunk_overlap = int(st.sidebar.number_input("Chunk overlap (words)", min_value=0, max_value=500, value=DEFAULT_CHUNK_OVERLAP, step=10)) |
|
|
top_k = int(st.sidebar.number_input("Top-k results to retrieve", min_value=1, max_value=50, value=DEFAULT_TOP_K)) |
|
|
|
|
|
st.sidebar.markdown("---") |
|
|
st.sidebar.write("Groq API key is loaded from Hugging Face secrets or environment variables. Do not paste keys in the UI.") |
|
|
|
|
|
with st.spinner("Loading embedding model..."): |
|
|
embed_model = load_embedding_model() |
|
|
|
|
|
index, metadata = load_index_and_metadata() |
|
|
if index is None and "index" in st.session_state: |
|
|
index = st.session_state["index"] |
|
|
metadata = st.session_state.get("metadata") |
|
|
|
|
|
if index is None: |
|
|
st.info("No existing FAISS index found. Upload documents and click 'Ingest documents' to build index.") |
|
|
else: |
|
|
try: |
|
|
nt = index.ntotal |
|
|
except Exception: |
|
|
nt = 0 |
|
|
st.success(f"Loaded FAISS index with {nt} vectors.") |
|
|
|
|
|
st.header("1) Upload documents") |
|
|
uploaded_files = st.file_uploader("Upload PDF / DOCX / TXT files", type=["pdf", "docx", "txt"], accept_multiple_files=True) |
|
|
|
|
|
if uploaded_files: |
|
|
st.session_state["uploaded_list"] = [f.name for f in uploaded_files] |
|
|
st.write("Files ready to ingest:", st.session_state["uploaded_list"]) |
|
|
|
|
|
if st.button("Ingest documents"): |
|
|
if not uploaded_files: |
|
|
st.warning("Please upload at least one file before ingesting.") |
|
|
else: |
|
|
all_chunks: List[str] = [] |
|
|
metadata = [] |
|
|
for uploaded in uploaded_files: |
|
|
tmp_path = save_uploaded_file(uploaded) |
|
|
name = uploaded.name |
|
|
text = "" |
|
|
if name.lower().endswith(".pdf"): |
|
|
text = extract_text_from_pdf(tmp_path) |
|
|
elif name.lower().endswith(".docx"): |
|
|
text = extract_text_from_docx(tmp_path) |
|
|
elif name.lower().endswith(".txt"): |
|
|
text = extract_text_from_txt(tmp_path) |
|
|
else: |
|
|
continue |
|
|
|
|
|
if not text or not text.strip(): |
|
|
continue |
|
|
|
|
|
chunks = chunk_text(text, chunk_size=chunk_size, overlap=chunk_overlap) |
|
|
for i, c in enumerate(chunks): |
|
|
all_chunks.append(c) |
|
|
metadata.append({"source": name, "chunk_id": i, "text": c}) |
|
|
|
|
|
if not all_chunks: |
|
|
st.error("No chunks were generated. Aborting.") |
|
|
else: |
|
|
with st.spinner("Computing embeddings..."): |
|
|
embeddings = embed_model.encode(all_chunks, show_progress_bar=True, convert_to_numpy=True) |
|
|
embeddings = normalize_embeddings(embeddings) |
|
|
|
|
|
index = build_faiss_index(embeddings) |
|
|
st.session_state["index"] = index |
|
|
st.session_state["metadata"] = metadata |
|
|
try: |
|
|
save_index_and_metadata(index, metadata) |
|
|
st.success(f"Index built and saved. {len(all_chunks)} chunks indexed.") |
|
|
except Exception: |
|
|
st.success(f"Index built in memory. {len(all_chunks)} chunks indexed.") |
|
|
st.info(f"Ingested {len(all_chunks)} chunks from {len(set(m['source'] for m in metadata))} file(s).") |
|
|
if len(all_chunks) > 0: |
|
|
st.markdown("### Example chunk") |
|
|
st.write(all_chunks[0][:1000]) |
|
|
|
|
|
st.markdown("---") |
|
|
|
|
|
st.header("2) Ask questions (uses retrieved context + Groq generation)") |
|
|
query = st.text_area("Enter your question here", height=120) |
|
|
|
|
|
col1, col2 = st.columns([1, 3]) |
|
|
with col1: |
|
|
if st.button("Ask"): |
|
|
if not query or not query.strip(): |
|
|
st.warning("Please enter a question.") |
|
|
else: |
|
|
if "index" not in st.session_state or "metadata" not in st.session_state: |
|
|
st.warning("No index available. Please ingest documents first.") |
|
|
else: |
|
|
index = st.session_state["index"] |
|
|
metadata = st.session_state["metadata"] |
|
|
q_emb = embed_model.encode([query], convert_to_numpy=True) |
|
|
q_emb = normalize_embeddings(q_emb) |
|
|
q_emb = np.ascontiguousarray(q_emb.astype('float32')) |
|
|
k = max(1, min(top_k, int(index.ntotal))) |
|
|
if k == 0: |
|
|
st.warning("Index is empty. Ingest documents first.") |
|
|
else: |
|
|
D, I = index.search(q_emb, k) |
|
|
hits = I[0].tolist() if I is not None else [] |
|
|
|
|
|
contexts = [] |
|
|
for idx in hits: |
|
|
if idx < 0 or idx >= len(metadata): |
|
|
continue |
|
|
meta = metadata[idx] |
|
|
contexts.append(meta) |
|
|
|
|
|
client = get_groq_client_from_env() |
|
|
if client is None: |
|
|
st.error("Groq client could not be created. Set GROQ_API_KEY in your Hugging Face secrets or environment.") |
|
|
else: |
|
|
system_msg = { |
|
|
"role": "system", |
|
|
"content": "You are an assistant that answers user questions using ONLY the context passages provided. If the answer is not contained in the context, say you don't know. Provide short answers and include the source(s) for any factual claims." |
|
|
} |
|
|
|
|
|
context_blocks = [] |
|
|
for c in contexts: |
|
|
block = f"[Source: {c['source']} | chunk_id: {c['chunk_id']}]\n{c['text']}" |
|
|
context_blocks.append(block) |
|
|
|
|
|
context_text = "\n\n---\n\n".join(context_blocks) |
|
|
|
|
|
user_msg_text = ( |
|
|
f"Context passages:\n\n{context_text}\n\n" |
|
|
f"Question: {query}\n\n" |
|
|
"Instructions: Use only the context passages above to answer. If the context does not contain enough information, respond that you don't know. Keep the answer concise and cite the source tags in square brackets." |
|
|
) |
|
|
|
|
|
user_msg = {"role": "user", "content": user_msg_text} |
|
|
|
|
|
with st.spinner("Generating answer from Groq model..."): |
|
|
answer = groq_chat_answer(client, messages=[system_msg, user_msg]) |
|
|
|
|
|
if answer: |
|
|
st.subheader("Answer") |
|
|
st.write(answer) |
|
|
|
|
|
st.subheader("Retrieved passages (for transparency)") |
|
|
for c in contexts: |
|
|
st.markdown(f"**Source:** {c['source']} — chunk {c['chunk_id']}") |
|
|
st.write(c['text'][:500]) |
|
|
st.markdown("---") |
|
|
|
|
|
st.sidebar.markdown("---") |
|
|
st.sidebar.header("Index status") |
|
|
if "index" in st.session_state: |
|
|
try: |
|
|
st.sidebar.write(f"Vectors in index: {st.session_state['index'].ntotal}") |
|
|
except Exception: |
|
|
st.sidebar.write("Vectors in index: unknown") |
|
|
else: |
|
|
st.sidebar.write("No index built yet") |
|
|
|
|
|
st.sidebar.markdown("\nMade for development in Colab + Streamlit.\nUse responsibly and avoid publishing API keys.") |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
main() |
|
|
|