SRINI123 commited on
Commit
2955434
·
verified ·
1 Parent(s): f9c64c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -9
app.py CHANGED
@@ -5,14 +5,16 @@ from langchain_huggingface import HuggingFaceEmbeddings, HuggingFaceEndpoint
5
  from langchain_community.llms import HuggingFaceHub
6
  from langchain.chains import ConversationalRetrievalChain
7
  from langchain.text_splitter import RecursiveCharacterTextSplitter
8
- from langchain.memory import ConversationBufferMemory
9
- from langchain_community.document_loaders import PyPDFLoader
 
 
10
  import os
 
11
  load_dotenv()
12
 
13
  # Get the Hugging Face API token from the .env file
14
  hf_api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
15
-
16
  if hf_api_token is None:
17
  raise ValueError("HUGGINGFACEHUB_API_TOKEN not found in .env file")
18
 
@@ -22,7 +24,8 @@ embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-Mi
22
  # Initialize the LLaMA 2 model from Hugging Face Hub using the token from .env
23
  llm = HuggingFaceEndpoint(
24
  repo_id="meta-llama/Llama-2-7b-hf",
25
- model_kwargs={"temperature": 0.7, "max_length": 512},
 
26
  huggingfacehub_api_token=hf_api_token
27
  )
28
 
@@ -31,7 +34,9 @@ vectorstore = Chroma(embedding_function=embedding_model, persist_directory="chro
31
 
32
  # Create a conversational chain with retrieval capabilities
33
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
34
- qa_chain = ConversationalRetrievalChain.from_llm(llm, retriever=vectorstore.as_retriever(), memory=memory)
 
 
35
 
36
  def upload_docs(docs):
37
  # Load and process the uploaded PDF documents
@@ -42,12 +47,12 @@ def upload_docs(docs):
42
 
43
  # Split documents into manageable chunks
44
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
45
- texts = text_splitter.split_documents(loaded_docs)
 
46
 
47
  # Add documents to the vector store and persist them
48
  vectorstore.add_documents(texts)
49
  vectorstore.persist()
50
-
51
  return "PDF documents uploaded and processed successfully!"
52
 
53
  def chat(query):
@@ -62,11 +67,10 @@ with gr.Blocks() as demo:
62
  doc_upload = gr.File(label="Upload your PDF documents", file_types=[".pdf"], multiple=True)
63
  upload_button = gr.Button("Upload")
64
  upload_button.click(upload_docs, inputs=doc_upload, outputs=gr.Textbox())
65
-
66
  with gr.Column():
67
  chat_input = gr.Textbox(label="Ask a question:")
68
  chat_output = gr.Textbox(label="Answer:")
69
  chat_button = gr.Button("Send")
70
  chat_button.click(chat, inputs=chat_input, outputs=chat_output)
71
 
72
- demo.launch()
 
5
  from langchain_community.llms import HuggingFaceHub
6
  from langchain.chains import ConversationalRetrievalChain
7
  from langchain.text_splitter import RecursiveCharacterTextSplitter
8
+ from langchain.memory  
9
+ import ConversationBufferMemory
10
+ from langchain_community.document_loaders  
11
+ import PyPDFLoader
12
  import os
13
+
14
  load_dotenv()
15
 
16
  # Get the Hugging Face API token from the .env file
17
  hf_api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
 
18
  if hf_api_token is None:
19
  raise ValueError("HUGGINGFACEHUB_API_TOKEN not found in .env file")
20
 
 
24
  # Initialize the LLaMA 2 model from Hugging Face Hub using the token from .env
25
  llm = HuggingFaceEndpoint(
26
  repo_id="meta-llama/Llama-2-7b-hf",
27
+ temperature=0.7, # Specify temperature explicitly
28
+ max_length=512,
29
  huggingfacehub_api_token=hf_api_token
30
  )
31
 
 
34
 
35
  # Create a conversational chain with retrieval capabilities
36
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
37
+ qa_chain = ConversationalRetrievalChain.from_llm(llm,  
38
+ retriever=vectorstore.as_retriever(), memory=memory)  
39
+
40
 
41
  def upload_docs(docs):
42
  # Load and process the uploaded PDF documents
 
47
 
48
  # Split documents into manageable chunks
49
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
50
+ texts = text_splitter.split_documents(loaded_docs)  
51
+
52
 
53
  # Add documents to the vector store and persist them
54
  vectorstore.add_documents(texts)
55
  vectorstore.persist()
 
56
  return "PDF documents uploaded and processed successfully!"
57
 
58
  def chat(query):
 
67
  doc_upload = gr.File(label="Upload your PDF documents", file_types=[".pdf"], multiple=True)
68
  upload_button = gr.Button("Upload")
69
  upload_button.click(upload_docs, inputs=doc_upload, outputs=gr.Textbox())
 
70
  with gr.Column():
71
  chat_input = gr.Textbox(label="Ask a question:")
72
  chat_output = gr.Textbox(label="Answer:")
73
  chat_button = gr.Button("Send")
74
  chat_button.click(chat, inputs=chat_input, outputs=chat_output)
75
 
76
+ demo.launch()