SRINI123 commited on
Commit
20a6b3e
·
verified ·
1 Parent(s): 73aaaf7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -0
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain.vectorstores import Chroma
3
+ from langchain.embeddings import HuggingFaceEmbeddings
4
+ from langchain.llms import HuggingFaceHub
5
+ from langchain.chains import ConversationalRetrievalChain
6
+ from langchain.document_loaders import SimpleDocumentLoader
7
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
8
+ from langchain.memory import ConversationBufferMemory
9
+
10
+ # Initialize the Hugging Face embedding model
11
+ embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
12
+
13
+ # Initialize the LLaMA 2 model from Hugging Face Hub
14
+ llm = HuggingFaceHub(repo_id="meta-llama/Llama-2-7b-hf", model_kwargs={"temperature": 0.7, "max_length": 512})
15
+
16
+ # Initialize ChromaDB for storing and retrieving document embeddings
17
+ vectorstore = Chroma(embedding_function=embedding_model, persist_directory="chroma_db")
18
+
19
+ # Create a conversational chain with retrieval capabilities
20
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
21
+ qa_chain = ConversationalRetrievalChain.from_llm(llm, retriever=vectorstore.as_retriever(), memory=memory)
22
+
23
+ def upload_docs(docs):
24
+ # Load and process the uploaded documents
25
+ loader = SimpleDocumentLoader(docs)
26
+ documents = loader.load()
27
+
28
+ # Split documents into manageable chunks
29
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
30
+ texts = text_splitter.split_documents(documents)
31
+
32
+ # Add documents to the vector store and persist them
33
+ vectorstore.add_documents(texts)
34
+ vectorstore.persist()
35
+
36
+ return "Documents uploaded and processed successfully!"
37
+
38
+ def chat(query):
39
+ # Process the query with the conversational chain and return the result
40
+ response = qa_chain({"query": query})
41
+ return response["result"]
42
+
43
+ # Gradio Interface
44
+ with gr.Blocks() as demo:
45
+ with gr.Row():
46
+ with gr.Column():
47
+ doc_upload = gr.File(label="Upload your documents", file_types=[".txt", ".pdf", ".docx"], multiple=True)
48
+ upload_button = gr.Button("Upload")
49
+ upload_button.click(upload_docs, inputs=doc_upload, outputs=gr.Textbox())
50
+
51
+ with gr.Column():
52
+ chat_input = gr.Textbox(label="Ask a question:")
53
+ chat_output = gr.Textbox(label="Answer:")
54
+ chat_button = gr.Button("Send")
55
+ chat_button.click(chat, inputs=chat_input, outputs=chat_output)
56
+
57
+ demo.launch()