Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from
|
| 3 |
-
from
|
| 4 |
-
from
|
| 5 |
from langchain.chains import ConversationalRetrievalChain
|
| 6 |
-
from langchain.document_loaders import SimpleDocumentLoader
|
| 7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 8 |
from langchain.memory import ConversationBufferMemory
|
|
|
|
| 9 |
|
| 10 |
# Initialize the Hugging Face embedding model
|
| 11 |
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
|
@@ -21,19 +21,21 @@ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=Tru
|
|
| 21 |
qa_chain = ConversationalRetrievalChain.from_llm(llm, retriever=vectorstore.as_retriever(), memory=memory)
|
| 22 |
|
| 23 |
def upload_docs(docs):
|
| 24 |
-
# Load and process the uploaded documents
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
| 28 |
# Split documents into manageable chunks
|
| 29 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
| 30 |
-
texts = text_splitter.split_documents(
|
| 31 |
-
|
| 32 |
# Add documents to the vector store and persist them
|
| 33 |
vectorstore.add_documents(texts)
|
| 34 |
vectorstore.persist()
|
| 35 |
-
|
| 36 |
-
return "
|
| 37 |
|
| 38 |
def chat(query):
|
| 39 |
# Process the query with the conversational chain and return the result
|
|
@@ -44,7 +46,7 @@ def chat(query):
|
|
| 44 |
with gr.Blocks() as demo:
|
| 45 |
with gr.Row():
|
| 46 |
with gr.Column():
|
| 47 |
-
doc_upload = gr.File(label="Upload your documents", file_types=[".
|
| 48 |
upload_button = gr.Button("Upload")
|
| 49 |
upload_button.click(upload_docs, inputs=doc_upload, outputs=gr.Textbox())
|
| 50 |
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from langchain_community.vectorstores import Chroma
|
| 3 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 4 |
+
from langchain_community.llms import HuggingFaceHub
|
| 5 |
from langchain.chains import ConversationalRetrievalChain
|
|
|
|
| 6 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 7 |
from langchain.memory import ConversationBufferMemory
|
| 8 |
+
from langchain_community.document_loaders import PyPDFLoader
|
| 9 |
|
| 10 |
# Initialize the Hugging Face embedding model
|
| 11 |
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
|
|
|
| 21 |
qa_chain = ConversationalRetrievalChain.from_llm(llm, retriever=vectorstore.as_retriever(), memory=memory)
|
| 22 |
|
| 23 |
def upload_docs(docs):
|
| 24 |
+
# Load and process the uploaded PDF documents
|
| 25 |
+
loaded_docs = []
|
| 26 |
+
for doc in docs:
|
| 27 |
+
loader = PyPDFLoader(doc.name)
|
| 28 |
+
loaded_docs.extend(loader.load())
|
| 29 |
+
|
| 30 |
# Split documents into manageable chunks
|
| 31 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
| 32 |
+
texts = text_splitter.split_documents(loaded_docs)
|
| 33 |
+
|
| 34 |
# Add documents to the vector store and persist them
|
| 35 |
vectorstore.add_documents(texts)
|
| 36 |
vectorstore.persist()
|
| 37 |
+
|
| 38 |
+
return "PDF documents uploaded and processed successfully!"
|
| 39 |
|
| 40 |
def chat(query):
|
| 41 |
# Process the query with the conversational chain and return the result
|
|
|
|
| 46 |
with gr.Blocks() as demo:
|
| 47 |
with gr.Row():
|
| 48 |
with gr.Column():
|
| 49 |
+
doc_upload = gr.File(label="Upload your PDF documents", file_types=[".pdf"], multiple=True)
|
| 50 |
upload_button = gr.Button("Upload")
|
| 51 |
upload_button.click(upload_docs, inputs=doc_upload, outputs=gr.Textbox())
|
| 52 |
|