Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from PyPDF2 import PdfReader | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| import os | |
| from langchain_google_genai import GoogleGenerativeAIEmbeddings | |
| import google.generativeai as genai | |
| from langchain_community.vectorstores import FAISS | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain.chains.question_answering import load_qa_chain | |
| from langchain.prompts import PromptTemplate | |
| from dotenv import load_dotenv | |
| # Load API key | |
| load_dotenv() | |
| genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) | |
| # Inject CSS for chat bubbles | |
| st.markdown(""" | |
| <style> | |
| .chat-bubble { | |
| padding: 12px 16px; | |
| margin: 10px 0; | |
| border-radius: 12px; | |
| max-width: 80%; | |
| font-size: 16px; | |
| line-height: 1.5; | |
| } | |
| .user { | |
| background-color: #DCF8C6; | |
| align-self: flex-end; | |
| margin-left: auto; | |
| } | |
| .bot { | |
| background-color: #F1F0F0; | |
| align-self: flex-start; | |
| margin-right: auto; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| def get_pdf_text(pdf_docs): | |
| text = "" | |
| for pdf in pdf_docs: | |
| pdf_reader = PdfReader(pdf) | |
| for page in pdf_reader.pages: | |
| page_text = page.extract_text() | |
| if page_text: | |
| text += page_text | |
| return text | |
| def get_text_chunks(text): | |
| text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000) | |
| chunks = text_splitter.split_text(text) | |
| return chunks | |
| def get_vector_store(text_chunks): | |
| if not text_chunks: | |
| raise ValueError("No text chunks to embed. Check if your PDF contains extractable text.") | |
| embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001") | |
| vector_store = FAISS.from_texts(text_chunks, embedding=embeddings) | |
| return vector_store | |
| def get_conversational_chain(): | |
| prompt_template = """ | |
| Answer the question as detailed as possible from the provided context. | |
| If the answer is not in the provided context, just say "answer is not available in the context". | |
| Don't make up answers. | |
| Context:\n{context}\n | |
| Question:\n{question}\n | |
| Answer: | |
| """ | |
| model = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash", temperature=0.3) | |
| prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"]) | |
| chain = load_qa_chain(model, chain_type="stuff", prompt=prompt) | |
| return chain | |
| def display_chat(user_msg, bot_msg): | |
| st.markdown(f"<div class='chat-bubble user'>{user_msg}</div>", unsafe_allow_html=True) | |
| st.markdown(f"<div class='chat-bubble bot'>{bot_msg}</div>", unsafe_allow_html=True) | |
| def main(): | |
| st.set_page_config(page_title="Chat with PDFs", layout="wide") | |
| st.title("π Chat with Your PDFs using Gemini") | |
| col1, col2 = st.columns([1, 2], gap="large") | |
| # LEFT: Upload PDFs | |
| with col1: | |
| st.header("π Upload & Process") | |
| pdf_docs = st.file_uploader("Upload PDF files", type=["pdf"], accept_multiple_files=True) | |
| if st.button("π Submit & Process"): | |
| if not pdf_docs: | |
| st.error("β Please upload at least one PDF file.") | |
| return | |
| with st.spinner("π Extracting text and creating embeddings..."): | |
| raw_text = get_pdf_text(pdf_docs) | |
| if not raw_text.strip(): | |
| st.error("β No extractable text found in the uploaded PDFs. They might be scanned images.") | |
| return | |
| text_chunks = get_text_chunks(raw_text) | |
| if not text_chunks: | |
| st.error("β Unable to create text chunks. Please try with a different PDF.") | |
| return | |
| st.info(f"β Extracted and split into {len(text_chunks)} chunks.") | |
| try: | |
| vector_store = get_vector_store(text_chunks) | |
| st.session_state.vector_store = vector_store | |
| st.success("PDFs processed successfully! You can now ask questions.") | |
| except Exception as e: | |
| st.error(f"β Error creating vector store: {str(e)}") | |
| # RIGHT: Ask Questions | |
| with col2: | |
| st.header("π¬ Ask Questions") | |
| user_question = st.text_input("Type your question here...") | |
| if user_question: | |
| if "vector_store" not in st.session_state: | |
| st.error("β Please upload and process PDFs first.") | |
| else: | |
| vector_store = st.session_state.vector_store | |
| docs = vector_store.similarity_search(user_question) | |
| chain = get_conversational_chain() | |
| response = chain( | |
| {"input_documents": docs, "question": user_question}, | |
| return_only_outputs=True | |
| ) | |
| display_chat(user_question, response["output_text"]) | |
| if __name__ == "__main__": | |
| main() | |