MBilal-72's picture
Update app.py
837c8fa verified
raw
history blame
2.19 kB
import os
import tempfile
import streamlit as st
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain_groq import GroqLLM
# Set environment variables (You can also use os.environ or Streamlit secrets)
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
# Initialize Groq LLM
llm = GroqLLM(
api_key=GROQ_API_KEY,
model="llama3-8b-8192", # <- correct param
temperature=0.1
)
# HuggingFace Embeddings
embedding = HuggingFaceEmbeddings()
st.title("๐Ÿ“„ RAG Chat with Groq + HuggingFace")
# Upload PDF
uploaded_file = st.file_uploader("Upload a PDF file", type=["pdf"])
user_query = st.text_input("Ask something about the document")
submit_button = st.button("Submit")
if uploaded_file and submit_button:
# Save PDF temporarily
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
tmp_file.write(uploaded_file.read())
tmp_path = tmp_file.name
# Load and split
loader = PyPDFLoader(tmp_path)
pages = loader.load_and_split()
# Create FAISS vectorstore
vectorstore = FAISS.from_documents(pages, embedding)
retriever = vectorstore.as_retriever()
# Custom prompt (optional)
prompt_template = PromptTemplate(
input_variables=["context", "question"],
template="""
Use the following context to answer the question. Be concise and accurate.
Context: {context}
Question: {question}
"""
)
# Create QA chain
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": prompt_template}
)
# Run QA
result = qa_chain({"query": user_query})
st.markdown("### ๐Ÿ’ฌ Answer")
st.write(result["result"])
# Optional: Show sources
with st.expander("๐Ÿ“„ Sources"):
for doc in result["source_documents"]:
st.write(doc.metadata["source"])