File size: 4,942 Bytes
0c407bd
 
4cea460
0c407bd
4cea460
0c407bd
08cc87a
0c407bd
 
 
 
4cea460
6352758
4cea460
0c407bd
 
a4c5a85
b94f04d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c407bd
e747eba
0c407bd
e747eba
0c407bd
6300d82
 
 
e747eba
0c407bd
 
 
e747eba
 
0c407bd
 
6300d82
 
6352758
e747eba
 
0c407bd
 
 
6352758
 
 
 
 
0c407bd
 
322b525
6352758
e747eba
 
0c407bd
b94f04d
 
 
0c407bd
 
e747eba
 
0c407bd
b94f04d
0c407bd
e747eba
b94f04d
 
 
 
6300d82
 
 
 
 
0c407bd
6300d82
 
e747eba
6300d82
 
0c407bd
6300d82
 
 
 
 
 
 
 
 
 
 
 
 
b94f04d
e747eba
b94f04d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a4c5a85
0c407bd
 
5cbc856
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from langchain_google_genai import GoogleGenerativeAIEmbeddings
import google.generativeai as genai
from langchain_community.vectorstores import FAISS
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
from dotenv import load_dotenv

# Load API key
load_dotenv()
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))

# Inject CSS for chat bubbles
st.markdown("""
    <style>
    .chat-bubble {
        padding: 12px 16px;
        margin: 10px 0;
        border-radius: 12px;
        max-width: 80%;
        font-size: 16px;
        line-height: 1.5;
    }
    .user {
        background-color: #DCF8C6;
        align-self: flex-end;
        margin-left: auto;
    }
    .bot {
        background-color: #F1F0F0;
        align-self: flex-start;
        margin-right: auto;
    }
    </style>
""", unsafe_allow_html=True)

def get_pdf_text(pdf_docs):
    text = ""
    for pdf in pdf_docs:
        pdf_reader = PdfReader(pdf)
        for page in pdf_reader.pages:
            page_text = page.extract_text()
            if page_text:
                text += page_text
    return text

def get_text_chunks(text):
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
    chunks = text_splitter.split_text(text)
    return chunks

def get_vector_store(text_chunks):
    if not text_chunks:
        raise ValueError("No text chunks to embed. Check if your PDF contains extractable text.")
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
    vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
    return vector_store

def get_conversational_chain():
    prompt_template = """
    Answer the question as detailed as possible from the provided context. 
    If the answer is not in the provided context, just say "answer is not available in the context".
    Don't make up answers.
    Context:\n{context}\n
    Question:\n{question}\n
    Answer:
    """
    model = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash", temperature=0.3)
    prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
    chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
    return chain

def display_chat(user_msg, bot_msg):
    st.markdown(f"<div class='chat-bubble user'>{user_msg}</div>", unsafe_allow_html=True)
    st.markdown(f"<div class='chat-bubble bot'>{bot_msg}</div>", unsafe_allow_html=True)

def main():
    st.set_page_config(page_title="Chat with PDFs", layout="wide")
    st.title("πŸ“š Chat with Your PDFs using Gemini")

    col1, col2 = st.columns([1, 2], gap="large")

    # LEFT: Upload PDFs
    with col1:
        st.header("πŸ“ Upload & Process")
        pdf_docs = st.file_uploader("Upload PDF files", type=["pdf"], accept_multiple_files=True)
        if st.button("πŸ”„ Submit & Process"):
            if not pdf_docs:
                st.error("❗ Please upload at least one PDF file.")
                return

            with st.spinner("πŸ” Extracting text and creating embeddings..."):
                raw_text = get_pdf_text(pdf_docs)

                if not raw_text.strip():
                    st.error("❗ No extractable text found in the uploaded PDFs. They might be scanned images.")
                    return

                text_chunks = get_text_chunks(raw_text)

                if not text_chunks:
                    st.error("❗ Unable to create text chunks. Please try with a different PDF.")
                    return

                st.info(f"βœ… Extracted and split into {len(text_chunks)} chunks.")

                try:
                    vector_store = get_vector_store(text_chunks)
                    st.session_state.vector_store = vector_store
                    st.success("PDFs processed successfully! You can now ask questions.")
                except Exception as e:
                    st.error(f"❗ Error creating vector store: {str(e)}")

    # RIGHT: Ask Questions
    with col2:
        st.header("πŸ’¬ Ask Questions")
        user_question = st.text_input("Type your question here...")

        if user_question:
            if "vector_store" not in st.session_state:
                st.error("❗ Please upload and process PDFs first.")
            else:
                vector_store = st.session_state.vector_store
                docs = vector_store.similarity_search(user_question)
                chain = get_conversational_chain()
                response = chain(
                    {"input_documents": docs, "question": user_question},
                    return_only_outputs=True
                )
                display_chat(user_question, response["output_text"])

if __name__ == "__main__":
    main()