import streamlit as st from PyPDF2 import PdfReader from langchain.text_splitter import RecursiveCharacterTextSplitter import os from langchain_google_genai import GoogleGenerativeAIEmbeddings import google.generativeai as genai from langchain_community.vectorstores import FAISS from langchain_google_genai import ChatGoogleGenerativeAI from langchain.chains.question_answering import load_qa_chain from langchain.prompts import PromptTemplate from dotenv import load_dotenv # Load API key load_dotenv() genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) # Inject CSS for chat bubbles st.markdown(""" """, unsafe_allow_html=True) def get_pdf_text(pdf_docs): text = "" for pdf in pdf_docs: pdf_reader = PdfReader(pdf) for page in pdf_reader.pages: page_text = page.extract_text() if page_text: text += page_text return text def get_text_chunks(text): text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000) chunks = text_splitter.split_text(text) return chunks def get_vector_store(text_chunks): if not text_chunks: raise ValueError("No text chunks to embed. Check if your PDF contains extractable text.") embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001") vector_store = FAISS.from_texts(text_chunks, embedding=embeddings) return vector_store def get_conversational_chain(): prompt_template = """ Answer the question as detailed as possible from the provided context. If the answer is not in the provided context, just say "answer is not available in the context". Don't make up answers. Context:\n{context}\n Question:\n{question}\n Answer: """ model = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash", temperature=0.3) prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"]) chain = load_qa_chain(model, chain_type="stuff", prompt=prompt) return chain def display_chat(user_msg, bot_msg): st.markdown(f"