File size: 10,253 Bytes
1fa9052
172fd5d
 
 
 
 
 
 
 
 
 
 
 
1fa9052
172fd5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain_groq import ChatGroq
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.llms import HuggingFaceHub
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from htmlTemplates import css, bot_template, user_template 
import os


def get_pdf_text(pdf_docs):
    text = ""
    for pdf in pdf_docs:
        pdf_reader = PdfReader(pdf)
        for page in pdf_reader.pages:
            text += page.extract_text()
    return text

def get_text_chunks(text):
    text_splitter = CharacterTextSplitter(
        separator="\n",
        chunk_size=1000,
        chunk_overlap=200,
        length_function=len
    )
    chunks = text_splitter.split_text(text)
    return chunks

def get_vector_store(text_chunks):
    try:
        model_name = "BAAI/bge-small-en"
        model_kwargs = {'device': 'cpu'}
        encode_kwargs = {"normalize_embeddings": True}
        
        embeddings = HuggingFaceEmbeddings(
            model_name=model_name,
            model_kwargs=model_kwargs,
            encode_kwargs=encode_kwargs,
            cache_folder="/tmp/huggingface_cache"
        )
        
        vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
        return vectorstore
    except Exception as e:
        st.error(f"Error creating vector store: {str(e)}")
        return None

def get_conversation_chain(vectorstore, api_key):
    if not api_key:
        st.error("Please provide a valid Groq API key.")
        return None
    
    try:
        # Set the API key in environment for this session
        os.environ["GROQ_API_KEY"] = api_key
        
        llm = ChatGroq(
            model="llama3-8b-8192",
            temperature=0,
            api_key=api_key
        )
        
        # Create the prompt template
        prompt = ChatPromptTemplate.from_messages([
            ("system", """You are a helpful assistant answering questions based on the provided documents.
            Answer the question using only the context provided.
            If you don't know the answer, just say that you don't know, don't try to make up an answer.
            Keep your answers focused and relevant to the question."""),
            ("human", """Context: {context}

Question: {question}

Answer: """)
        ])

        # Create the retrieval chain
        retriever = vectorstore.as_retriever(search_kwargs={"k": 4})
        
        # Define the chain
        chain = (
            {"context": retriever, "question": RunnablePassthrough()}
            | prompt
            | llm
            | StrOutputParser()
        )

        return chain
        
    except Exception as e:
        st.error(f"Failed to initialize Groq model: {str(e)}")
        st.info("Please check if your API key is valid. Get your API key from: https://console.groq.com/keys")
        return None

def handle_user_input(user_question):
    if st.session_state.conversation is None:
        st.warning("Please upload and process documents first.")
        return

    try:
        # Invoke the chain with the question
        response = st.session_state.conversation.invoke(user_question)
        
        # Update chat history
        if 'chat_history' not in st.session_state:
            st.session_state.chat_history = []
        
        # Add the new messages to chat history
        st.session_state.chat_history.append(("user", user_question))
        st.session_state.chat_history.append(("bot", response))

        # Display chat history
        for sender, message in st.session_state.chat_history:
            if sender == "user":
                st.write(user_template.replace("{{MSG}}", message), unsafe_allow_html=True)
            else:
                st.write(bot_template.replace("{{MSG}}", message), unsafe_allow_html=True)
    
    except Exception as e:
        st.error(f"An error occurred while processing your question: {str(e)}")
        st.info("This might be due to an invalid API key or network issues.")

def main():
    load_dotenv()
    
    # Set environment variables for HuggingFace cache
    os.environ['HUGGINGFACE_HUB_CACHE'] = '/tmp/huggingface_cache'
    os.environ['TRANSFORMERS_CACHE'] = '/tmp/huggingface_cache'
    
    # Create cache directory
    os.makedirs('/tmp/huggingface_cache', exist_ok=True)

    if 'user_template' not in globals():
        global user_template
        user_template = '''
        <div class="chat-message user">
            <div class="avatar">
                <img src="https://i.ibb.co/rdZC7LZ/user.png">
            </div>
            <div class="message">{{MSG}}</div>
        </div>
        '''

    if 'bot_template' not in globals():
        global bot_template
        bot_template = '''
        <div class="chat-message bot">
            <div class="avatar">
                <img src="https://i.ibb.co/cN0nmSj/robot.png">
            </div>
            <div class="message">{{MSG}}</div>
        </div>
        '''

    st.set_page_config(page_title='Chat with PDFs', page_icon=":books:")
    st.write(css, unsafe_allow_html=True)

    # Initialize session state
    if "conversation" not in st.session_state:
        st.session_state.conversation = None
    
    if "chat_history" not in st.session_state:
        st.session_state.chat_history = []

    if "groq_api_key" not in st.session_state:
        st.session_state.groq_api_key = ""

    st.header('PDF ChatBot πŸ“š')
    
    # API Key Input Section
    st.sidebar.header("πŸ”‘ API Configuration")
    
    # API Key input
    groq_api_key = st.sidebar.text_input(
        "Enter your Groq API Key:",
        type="password",
        value=st.session_state.groq_api_key,
        help="Get your free API key from https://console.groq.com/keys"
    )
    
    # Update session state
    if groq_api_key:
        st.session_state.groq_api_key = groq_api_key
        st.sidebar.success("βœ… API Key provided!")
    else:
        st.sidebar.warning("⚠️ Please enter your Groq API key to continue.")
        st.sidebar.info("Get your free API key from: https://console.groq.com/keys")
    
    st.sidebar.markdown("---")
    
    # Sidebar for PDF upload
    st.sidebar.subheader("πŸ“„ Upload Documents")
    pdf_docs = st.sidebar.file_uploader(
        "Upload your PDFs here and click 'Process'",
        accept_multiple_files=True,
        type=['pdf']
    )
    
    # Process button
    if st.sidebar.button('πŸš€ Process Documents'):
        if not groq_api_key:
            st.sidebar.error("❌ Please enter your Groq API key first!")
            st.error("Please provide your Groq API key in the sidebar to continue.")
            return
            
        if not pdf_docs:
            st.sidebar.warning("πŸ“‹ Please upload at least one PDF document.")
            return
            
        with st.spinner("Processing documents... This may take a few minutes for the first run."):
            try:
                # Get PDF text
                raw_text = get_pdf_text(pdf_docs)
                
                if not raw_text.strip():
                    st.error("❌ No text could be extracted from the PDFs. Please check if the PDFs contain readable text.")
                    return
                
                st.info(f"βœ… Extracted {len(raw_text)} characters from {len(pdf_docs)} PDF(s)")
                
                # Get text chunks
                text_chunks = get_text_chunks(raw_text)
                st.info(f"βœ… Created {len(text_chunks)} text chunks")
                
                # Create vector store
                with st.spinner("Creating embeddings..."):
                    vectorstore = get_vector_store(text_chunks)
                
                if vectorstore is None:
                    st.error("❌ Failed to create vector store. Please try again.")
                    return
                
                st.info("βœ… Vector store created successfully")
                
                # Create conversation chain
                with st.spinner("Initializing conversation chain..."):
                    conversation = get_conversation_chain(vectorstore, groq_api_key)
                
                if conversation is None:
                    st.error("❌ Failed to create conversation chain. Please check your API key.")
                    return
                    
                st.session_state.conversation = conversation
                st.success("πŸŽ‰ Documents processed successfully! You can now ask questions.")
                
            except Exception as e:
                st.error(f"❌ An error occurred: {str(e)}")
                st.info("Please check your API key and try again.")

    # Main chat interface
    st.subheader("πŸ’¬ Ask Questions About Your Documents")
    
    if not groq_api_key:
        st.info("πŸ‘† Please enter your Groq API key in the sidebar to get started.")
        st.info("πŸ”— Get your free API key from: https://console.groq.com/keys")
    elif st.session_state.conversation is None:
        st.info("πŸ“€ Upload and process your PDF documents using the sidebar to start chatting.")
    else:
        user_question = st.text_input(
            "Your question:",
            placeholder="Ask anything about your uploaded documents..."
        )
        
        if user_question:
            handle_user_input(user_question)

    # Display instructions
    if not groq_api_key or st.session_state.conversation is None:
        st.markdown("---")
        st.markdown("### πŸ“‹ How to Use:")
        st.markdown("""
        1. **Get API Key**: Visit [Groq Console](https://console.groq.com/keys) to get your free API key
        2. **Enter API Key**: Paste your API key in the sidebar
        3. **Upload PDFs**: Upload one or more PDF documents
        4. **Process**: Click 'Process Documents' to analyze your PDFs
        5. **Chat**: Ask questions about your documents!
        """)

if __name__ == "__main__":
    main()