Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,76 +1,76 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
-
|
| 4 |
-
from langchain_community.
|
| 5 |
from langchain_community.vectorstores import FAISS
|
| 6 |
-
from
|
| 7 |
-
from langchain_community.document_loaders import TextLoader, PyPDFLoader
|
| 8 |
from langchain.chains import RetrievalQA
|
| 9 |
-
from langchain.
|
| 10 |
-
from
|
| 11 |
|
| 12 |
-
#
|
| 13 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
# Streamlit UI
|
| 38 |
-
st.set_page_config(page_title="Groq RAG App", layout="centered")
|
| 39 |
-
st.title("π RAG App with Groq + LangChain + FAISS")
|
| 40 |
-
st.write("Upload a PDF or TXT file, ask a question, and get smart answers.")
|
| 41 |
-
|
| 42 |
-
uploaded_file = st.file_uploader("Upload your document", type=["pdf", "txt"])
|
| 43 |
-
|
| 44 |
-
if uploaded_file:
|
| 45 |
-
with NamedTemporaryFile(delete=False) as tmp_file:
|
| 46 |
tmp_file.write(uploaded_file.read())
|
| 47 |
tmp_path = tmp_file.name
|
| 48 |
|
| 49 |
-
# Load
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
| 54 |
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
| 59 |
-
texts = splitter.split_documents(docs)
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
|
| 65 |
-
#
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
-
#
|
| 70 |
-
|
|
|
|
|
|
|
| 71 |
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
st.success(result)
|
|
|
|
| 1 |
import os
|
| 2 |
+
import tempfile
|
| 3 |
import streamlit as st
|
| 4 |
+
|
| 5 |
+
from langchain_community.document_loaders import PyPDFLoader
|
| 6 |
from langchain_community.vectorstores import FAISS
|
| 7 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
|
|
|
| 8 |
from langchain.chains import RetrievalQA
|
| 9 |
+
from langchain.prompts import PromptTemplate
|
| 10 |
+
from langchain_groq import GroqLLM
|
| 11 |
|
| 12 |
+
# Set environment variables (You can also use os.environ or Streamlit secrets)
|
| 13 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
| 14 |
+
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
|
| 15 |
+
|
| 16 |
+
# Initialize Groq LLM
|
| 17 |
+
llm = GroqLLM(
|
| 18 |
+
api_key=GROQ_API_KEY,
|
| 19 |
+
model="llama3-8b-8192", # <- correct param
|
| 20 |
+
temperature=0.1
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# HuggingFace Embeddings
|
| 24 |
+
embedding = HuggingFaceEmbeddings()
|
| 25 |
+
|
| 26 |
+
st.title("π RAG Chat with Groq + HuggingFace")
|
| 27 |
+
|
| 28 |
+
# Upload PDF
|
| 29 |
+
uploaded_file = st.file_uploader("Upload a PDF file", type=["pdf"])
|
| 30 |
+
|
| 31 |
+
user_query = st.text_input("Ask something about the document")
|
| 32 |
+
submit_button = st.button("Submit")
|
| 33 |
+
|
| 34 |
+
if uploaded_file and submit_button:
|
| 35 |
+
# Save PDF temporarily
|
| 36 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
tmp_file.write(uploaded_file.read())
|
| 38 |
tmp_path = tmp_file.name
|
| 39 |
|
| 40 |
+
# Load and split
|
| 41 |
+
loader = PyPDFLoader(tmp_path)
|
| 42 |
+
pages = loader.load_and_split()
|
| 43 |
+
|
| 44 |
+
# Create FAISS vectorstore
|
| 45 |
+
vectorstore = FAISS.from_documents(pages, embedding)
|
| 46 |
+
retriever = vectorstore.as_retriever()
|
| 47 |
|
| 48 |
+
# Custom prompt (optional)
|
| 49 |
+
prompt_template = PromptTemplate(
|
| 50 |
+
input_variables=["context", "question"],
|
| 51 |
+
template="""
|
| 52 |
+
Use the following context to answer the question. Be concise and accurate.
|
| 53 |
|
| 54 |
+
Context: {context}
|
|
|
|
|
|
|
| 55 |
|
| 56 |
+
Question: {question}
|
| 57 |
+
"""
|
| 58 |
+
)
|
| 59 |
|
| 60 |
+
# Create QA chain
|
| 61 |
+
qa_chain = RetrievalQA.from_chain_type(
|
| 62 |
+
llm=llm,
|
| 63 |
+
retriever=retriever,
|
| 64 |
+
return_source_documents=True,
|
| 65 |
+
chain_type_kwargs={"prompt": prompt_template}
|
| 66 |
+
)
|
| 67 |
|
| 68 |
+
# Run QA
|
| 69 |
+
result = qa_chain({"query": user_query})
|
| 70 |
+
st.markdown("### π¬ Answer")
|
| 71 |
+
st.write(result["result"])
|
| 72 |
|
| 73 |
+
# Optional: Show sources
|
| 74 |
+
with st.expander("π Sources"):
|
| 75 |
+
for doc in result["source_documents"]:
|
| 76 |
+
st.write(doc.metadata["source"])
|
|
|