Spaces:
Sleeping
Sleeping
File size: 3,789 Bytes
03e75d1 1549f47 03e75d1 1549f47 517d6f4 c17f091 1549f47 517d6f4 1ced36c 03e75d1 1549f47 517d6f4 1549f47 df29bbf 1549f47 517d6f4 1549f47 df29bbf 1549f47 517d6f4 03e75d1 517d6f4 1549f47 517d6f4 1549f47 517d6f4 dc69aa4 1549f47 c17f091 1549f47 517d6f4 1549f47 517d6f4 1549f47 517d6f4 1549f47 517d6f4 1549f47 df29bbf 1549f47 517d6f4 1549f47 03e75d1 1549f47 df29bbf 1549f47 df29bbf 1549f47 03e75d1 df29bbf 1549f47 2abca2a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import os
import logging
from dotenv import load_dotenv
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain_groq import ChatGroq
# Load environment variables
load_dotenv()
# Set up logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
# Function to extract text from PDF files
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text() or ""
return text
# Function to split the extracted text into chunks
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
return text_splitter.split_text(text)
# Function to create a FAISS vectorstore using Hugging Face embeddings
def get_vectorstore(text_chunks):
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
# Function to set up the conversational retrieval chain
def get_conversation_chain(vectorstore):
try:
llm = ChatGroq(model="mixtral-8x7b-32768", temperature=0.5)
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
logging.info("Conversation chain created successfully.")
return conversation_chain
except Exception as e:
logging.error(f"Error creating conversation chain: {e}")
st.error("An error occurred while setting up the conversation chain.")
# Handle user input
def handle_userinput(user_question):
if st.session_state.conversation is not None:
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response.get('chat_history', [])
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(f"*User:* {message.content}")
else:
st.write(f"*Bot:* {message.content}")
else:
st.warning("Please process the documents first.")
# Main function to run the Streamlit app
def main():
load_dotenv()
st.set_page_config(page_title="Chat with multiple PDFs", page_icon=":books:")
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
st.header("Chat with multiple PDFs :books:")
user_question = st.text_input("Ask a question about your documents:")
if user_question:
handle_userinput(user_question)
with st.sidebar:
st.subheader("Your documents")
pdf_docs = st.file_uploader("Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing..."):
raw_text = get_pdf_text(pdf_docs)
text_chunks = get_text_chunks(raw_text)
vectorstore = get_vectorstore(text_chunks)
if vectorstore:
st.session_state.conversation = get_conversation_chain(vectorstore)
if __name__ == '__main__':
main()
|