Update app.py
Browse files
app.py
CHANGED
|
@@ -7,20 +7,27 @@ from langchain_community.vectorstores import FAISS
|
|
| 7 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 8 |
from langchain.chains import RetrievalQA
|
| 9 |
from langchain.prompts import PromptTemplate
|
| 10 |
-
from
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
# --- Environment
|
| 13 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY", "your-groq-api-key")
|
| 14 |
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY", "your-huggingface-api-key")
|
| 15 |
|
| 16 |
-
# --- Groq LLM
|
| 17 |
-
llm = GroqLLM(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
api_key=GROQ_API_KEY,
|
| 19 |
-
|
| 20 |
temperature=0.1
|
| 21 |
)
|
| 22 |
|
| 23 |
-
# --- HuggingFace Embeddings
|
| 24 |
embedding = HuggingFaceEmbeddings(
|
| 25 |
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
| 26 |
cache_folder="./hf_cache",
|
|
@@ -28,36 +35,57 @@ embedding = HuggingFaceEmbeddings(
|
|
| 28 |
)
|
| 29 |
|
| 30 |
# --- Streamlit UI ---
|
| 31 |
-
st.title("
|
| 32 |
|
|
|
|
| 33 |
uploaded_file = st.file_uploader("Upload a PDF file", type=["pdf"])
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
submit_button = st.button("Submit")
|
| 36 |
|
| 37 |
-
if
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
pages = loader.load_and_split()
|
| 45 |
|
| 46 |
-
#
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
retriever = vectorstore.as_retriever()
|
| 49 |
|
| 50 |
-
#
|
| 51 |
prompt_template = PromptTemplate(
|
| 52 |
input_variables=["context", "question"],
|
| 53 |
template="""
|
| 54 |
-
You are an
|
|
|
|
|
|
|
| 55 |
Context: {context}
|
| 56 |
Question: {question}
|
| 57 |
Answer:"""
|
| 58 |
)
|
| 59 |
|
| 60 |
-
#
|
| 61 |
qa_chain = RetrievalQA.from_chain_type(
|
| 62 |
llm=llm,
|
| 63 |
chain_type="stuff",
|
|
@@ -66,12 +94,15 @@ if uploaded_file and submit_button:
|
|
| 66 |
chain_type_kwargs={"prompt": prompt_template}
|
| 67 |
)
|
| 68 |
|
| 69 |
-
#
|
| 70 |
result = qa_chain({"query": user_query})
|
|
|
|
|
|
|
| 71 |
st.markdown("### 💬 Answer")
|
| 72 |
st.write(result["result"])
|
| 73 |
|
| 74 |
-
#
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
| 7 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 8 |
from langchain.chains import RetrievalQA
|
| 9 |
from langchain.prompts import PromptTemplate
|
| 10 |
+
from langchain.schema import Document
|
| 11 |
+
# from langchain_groq import GroqLLM
|
| 12 |
+
from langchain_groq import ChatGroq
|
| 13 |
|
| 14 |
+
# --- Environment Variables ---
|
| 15 |
GROQ_API_KEY = os.getenv("GROQ_API_KEY", "your-groq-api-key")
|
| 16 |
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY", "your-huggingface-api-key")
|
| 17 |
|
| 18 |
+
# --- Initialize Groq LLM ---
|
| 19 |
+
# llm = GroqLLM(
|
| 20 |
+
# api_key=GROQ_API_KEY,
|
| 21 |
+
# model="llama3-8b-8192",
|
| 22 |
+
# temperature=0.1
|
| 23 |
+
# )
|
| 24 |
+
llm = ChatGroq(
|
| 25 |
api_key=GROQ_API_KEY,
|
| 26 |
+
model_name="llama3-8b-8192", # Note: it's `model_name` not `model`
|
| 27 |
temperature=0.1
|
| 28 |
)
|
| 29 |
|
| 30 |
+
# --- HuggingFace Embeddings ---
|
| 31 |
embedding = HuggingFaceEmbeddings(
|
| 32 |
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
| 33 |
cache_folder="./hf_cache",
|
|
|
|
| 35 |
)
|
| 36 |
|
| 37 |
# --- Streamlit UI ---
|
| 38 |
+
st.title("📄📥 Chat with PDF or Text using Groq + RAG")
|
| 39 |
|
| 40 |
+
# Option to upload PDF
|
| 41 |
uploaded_file = st.file_uploader("Upload a PDF file", type=["pdf"])
|
| 42 |
+
|
| 43 |
+
# Option to paste raw text
|
| 44 |
+
pasted_text = st.text_area("Or paste some text below:")
|
| 45 |
+
|
| 46 |
+
# User's question
|
| 47 |
+
user_query = st.text_input("Ask a question about the content")
|
| 48 |
+
|
| 49 |
+
# Submit button
|
| 50 |
submit_button = st.button("Submit")
|
| 51 |
|
| 52 |
+
if submit_button:
|
| 53 |
+
documents = []
|
| 54 |
+
|
| 55 |
+
# Handle uploaded PDF
|
| 56 |
+
if uploaded_file:
|
| 57 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
|
| 58 |
+
tmp_file.write(uploaded_file.read())
|
| 59 |
+
tmp_path = tmp_file.name
|
| 60 |
|
| 61 |
+
loader = PyPDFLoader(tmp_path)
|
| 62 |
+
documents = loader.load_and_split()
|
|
|
|
| 63 |
|
| 64 |
+
# Handle pasted text if no PDF
|
| 65 |
+
elif pasted_text.strip():
|
| 66 |
+
documents = [Document(page_content=pasted_text)]
|
| 67 |
+
|
| 68 |
+
else:
|
| 69 |
+
st.warning("Please upload a PDF or paste some text.")
|
| 70 |
+
st.stop()
|
| 71 |
+
|
| 72 |
+
# Create vector store
|
| 73 |
+
vectorstore = FAISS.from_documents(documents, embedding)
|
| 74 |
retriever = vectorstore.as_retriever()
|
| 75 |
|
| 76 |
+
# Optional custom prompt
|
| 77 |
prompt_template = PromptTemplate(
|
| 78 |
input_variables=["context", "question"],
|
| 79 |
template="""
|
| 80 |
+
You are an AI assistant. Use the following context to answer the question.
|
| 81 |
+
Be concise, accurate, and helpful.
|
| 82 |
+
|
| 83 |
Context: {context}
|
| 84 |
Question: {question}
|
| 85 |
Answer:"""
|
| 86 |
)
|
| 87 |
|
| 88 |
+
# QA Chain
|
| 89 |
qa_chain = RetrievalQA.from_chain_type(
|
| 90 |
llm=llm,
|
| 91 |
chain_type="stuff",
|
|
|
|
| 94 |
chain_type_kwargs={"prompt": prompt_template}
|
| 95 |
)
|
| 96 |
|
| 97 |
+
# Run QA
|
| 98 |
result = qa_chain({"query": user_query})
|
| 99 |
+
|
| 100 |
+
# Show result
|
| 101 |
st.markdown("### 💬 Answer")
|
| 102 |
st.write(result["result"])
|
| 103 |
|
| 104 |
+
# Show sources (only if from PDF)
|
| 105 |
+
if uploaded_file:
|
| 106 |
+
with st.expander("📄 Sources"):
|
| 107 |
+
for i, doc in enumerate(result["source_documents"]):
|
| 108 |
+
st.write(f"**Page {i+1}** — {doc.metadata.get('source', 'Unknown')}")
|