PriyaMishra commited on
Commit
5278dab
·
verified ·
1 Parent(s): ef9716d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -124
app.py CHANGED
@@ -2,6 +2,18 @@ import gradio as gr
2
  import os
3
  api_token = os.getenv("HF_TOKEN")
4
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  # Custom prompt template
6
  CUSTOM_PROMPT_TEMPLATE = """
7
  **Response Instructions:**
@@ -22,33 +34,22 @@ Chat History: {chat_history}
22
 
23
  Craft the response as a seamless, thorough, and authoritative explanation that naturally integrates all aspects of the query.
24
  """
25
- from langchain_community.vectorstores import FAISS
26
- from langchain_community.document_loaders import PyPDFLoader
27
- from langchain.text_splitter import RecursiveCharacterTextSplitter
28
- from langchain_community.vectorstores import Chroma
29
- from langchain.chains import ConversationalRetrievalChain
30
- from langchain_community.embeddings import HuggingFaceEmbeddings
31
- from langchain_community.llms import HuggingFacePipeline
32
- from langchain.chains import ConversationChain
33
- from langchain.memory import ConversationBufferMemory
34
- from langchain_community.llms import HuggingFaceEndpoint
35
- import torch
36
 
37
- list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"]
38
- list_llm_simple = [os.path.basename(llm) for llm in list_llm]
39
-
40
- # Load and split PDF document
41
  def load_doc(list_file_path):
42
- # Processing for one document only
43
- # loader = PyPDFLoader(file_path)
44
- # pages = loader.load()
45
- loaders = [PyPDFLoader(x) for x in list_file_path]
46
  pages = []
47
- for loader in loaders:
 
 
 
 
 
 
48
  pages.extend(loader.load())
 
49
  text_splitter = RecursiveCharacterTextSplitter(
50
- chunk_size = 1024,
51
- chunk_overlap = 64
52
  )
53
  doc_splits = text_splitter.split_documents(pages)
54
  return doc_splits
@@ -59,25 +60,15 @@ def create_db(splits):
59
  vectordb = FAISS.from_documents(splits, embeddings)
60
  return vectordb
61
 
62
-
63
- # Initialize langchain LLM chain
64
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
65
- if llm_model == "meta-llama/Meta-Llama-3-8B-Instruct":
66
- llm = HuggingFaceEndpoint(
67
- repo_id=llm_model,
68
- huggingfacehub_api_token = api_token,
69
- temperature = temperature,
70
- max_new_tokens = max_tokens,
71
- top_k = top_k,
72
- )
73
- else:
74
- llm = HuggingFaceEndpoint(
75
- huggingfacehub_api_token = api_token,
76
- repo_id=llm_model,
77
- temperature = temperature,
78
- max_new_tokens = max_tokens,
79
- top_k = top_k,
80
- )
81
 
82
  memory = ConversationBufferMemory(
83
  memory_key="chat_history",
@@ -85,157 +76,143 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
85
  return_messages=True
86
  )
87
 
88
- # Create custom prompt
89
- QA_PROMPT = PromptTemplate(
90
  template=CUSTOM_PROMPT_TEMPLATE,
91
  input_variables=["context", "question", "chat_history"]
92
  )
93
 
94
- retriever = vector_db.as_retriever(search_kwargs={"k": 5})
95
  qa_chain = ConversationalRetrievalChain.from_llm(
96
  llm,
97
  retriever=retriever,
 
98
  memory=memory,
99
- combine_docs_chain_kwargs={"prompt": QA_PROMPT},
100
  return_source_documents=True,
101
- verbose=False
 
102
  )
103
  return qa_chain
104
 
105
  # Initialize database
106
  def initialize_database(list_file_obj, progress=gr.Progress()):
107
- # Create a list of documents (when valid)
108
  list_file_path = [x.name for x in list_file_obj if x is not None]
109
- # Load document and create splits
110
  doc_splits = load_doc(list_file_path)
111
- # Create or load vector database
112
  vector_db = create_db(doc_splits)
113
  return vector_db, "Database created!"
114
 
115
  # Initialize LLM
116
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
117
- # print("llm_option",llm_option)
118
  llm_name = list_llm[llm_option]
119
- print("llm_name: ",llm_name)
120
  qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
121
  return qa_chain, "QA chain initialized. Chatbot is ready!"
122
 
123
-
124
  def format_chat_history(message, chat_history):
125
  formatted_chat_history = []
126
  for user_message, bot_message in chat_history:
127
  formatted_chat_history.append(f"User: {user_message}")
128
  formatted_chat_history.append(f"Assistant: {bot_message}")
129
  return formatted_chat_history
130
-
131
 
132
  def conversation(qa_chain, message, history):
133
  formatted_chat_history = format_chat_history(message, history)
134
- # Generate response using QA chain
135
  response = qa_chain.invoke({"question": message, "chat_history": formatted_chat_history})
136
  response_answer = response["answer"]
137
- if response_answer.find("Helpful Answer:") != -1:
138
- response_answer = response_answer.split("Helpful Answer:")[-1]
139
  response_sources = response["source_documents"]
140
- response_source1 = response_sources[0].page_content.strip()
141
- response_source2 = response_sources[1].page_content.strip()
142
- response_source3 = response_sources[2].page_content.strip()
143
- # Langchain sources are zero-based
144
- response_source1_page = response_sources[0].metadata["page"] + 1
145
- response_source2_page = response_sources[1].metadata["page"] + 1
146
- response_source3_page = response_sources[2].metadata["page"] + 1
147
- # Append user message and response to chat history
148
- new_history = history + [(message, response_answer)]
149
- return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
150
 
151
-
152
- def upload_file(file_obj):
153
- list_file_path = []
154
- for idx, file in enumerate(file_obj):
155
- file_path = file_obj.name
156
- list_file_path.append(file_path)
157
- return list_file_path
158
-
 
 
 
 
 
 
 
 
159
 
160
  def demo():
161
- # with gr.Blocks(theme=gr.themes.Default(primary_hue="sky")) as demo:
162
- with gr.Blocks(theme=gr.themes.Default(primary_hue="red", secondary_hue="pink", neutral_hue = "sky")) as demo:
163
  vector_db = gr.State()
164
  qa_chain = gr.State()
165
- gr.HTML("<center><h1>RAG PDF chatbot</h1><center>")
166
- gr.Markdown("""<b>Query your PDF documents!</b> This AI agent is designed to perform retrieval augmented generation (RAG) on PDF documents. The app is hosted on Hugging Face Hub for the sole purpose of demonstration. \
167
  <b>Please do not upload confidential documents.</b>
168
  """)
 
169
  with gr.Row():
170
- with gr.Column(scale = 86):
171
- gr.Markdown("<b>Step 1 - Upload PDF documents and Initialize RAG pipeline</b>")
172
  with gr.Row():
173
- document = gr.Files(height=300, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload PDF documents")
 
 
174
  with gr.Row():
175
  db_btn = gr.Button("Create vector database")
176
  with gr.Row():
177
- db_progress = gr.Textbox(value="Not initialized", show_label=False) # label="Vector database status",
178
- gr.Markdown("<style>body { font-size: 16px; }</style><b>Select Large Language Model (LLM) and input parameters</b>")
 
179
  with gr.Row():
180
- llm_btn = gr.Radio(list_llm_simple, label="Available LLMs", value = list_llm_simple[0], type="index") # info="Select LLM", show_label=False
 
181
  with gr.Row():
182
  with gr.Accordion("LLM input parameters", open=False):
183
  with gr.Row():
184
- slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.5, step=0.1, label="Temperature", info="Controls randomness in token generation", interactive=True)
 
185
  with gr.Row():
186
- slider_maxtokens = gr.Slider(minimum = 128, maximum = 9192, value=4096, step=128, label="Max New Tokens", info="Maximum number of tokens to be generated",interactive=True)
 
187
  with gr.Row():
188
- slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k", info="Number of tokens to select the next token from", interactive=True)
 
189
  with gr.Row():
190
  qachain_btn = gr.Button("Initialize Question Answering Chatbot")
191
  with gr.Row():
192
- llm_progress = gr.Textbox(value="Not initialized", show_label=False) # label="Chatbot status",
193
 
194
- with gr.Column(scale = 200):
195
  gr.Markdown("<b>Step 2 - Chat with your Document</b>")
196
  chatbot = gr.Chatbot(height=505)
197
- with gr.Accordion("Relevent context from the source document", open=False):
198
- with gr.Row():
199
- doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
200
- source1_page = gr.Number(label="Page", scale=1)
201
- with gr.Row():
202
- doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
203
- source2_page = gr.Number(label="Page", scale=1)
204
- with gr.Row():
205
- doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
206
- source3_page = gr.Number(label="Page", scale=1)
207
  with gr.Row():
208
  msg = gr.Textbox(placeholder="Ask a question", container=True)
209
  with gr.Row():
210
  submit_btn = gr.Button("Submit")
211
  clear_btn = gr.ClearButton([msg, chatbot], value="Clear")
212
-
213
- # Preprocessing events
214
- db_btn.click(initialize_database, \
215
- inputs=[document], \
216
- outputs=[vector_db, db_progress])
217
- qachain_btn.click(initialize_LLM, \
218
- inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
219
- outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
220
- inputs=None, \
221
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
222
- queue=False)
223
-
224
- # Chatbot events
225
- msg.submit(conversation, \
226
- inputs=[qa_chain, msg, chatbot], \
227
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
228
- queue=False)
229
- submit_btn.click(conversation, \
230
- inputs=[qa_chain, msg, chatbot], \
231
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
232
- queue=False)
233
- clear_btn.click(lambda:[None,"",0,"",0,"",0], \
234
- inputs=None, \
235
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
236
- queue=False)
237
  demo.queue().launch(debug=True)
238
 
239
-
240
  if __name__ == "__main__":
241
  demo()
 
2
  import os
3
  api_token = os.getenv("HF_TOKEN")
4
 
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain_community.document_loaders import PyPDFLoader, TextLoader
7
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
8
+ from langchain.chains import ConversationalRetrievalChain
9
+ from langchain_community.embeddings import HuggingFaceEmbeddings
10
+ from langchain_community.llms import HuggingFaceEndpoint
11
+ from langchain.memory import ConversationBufferMemory
12
+ from langchain.prompts import PromptTemplate
13
+
14
+ list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"]
15
+ list_llm_simple = [os.path.basename(llm) for llm in list_llm]
16
+
17
  # Custom prompt template
18
  CUSTOM_PROMPT_TEMPLATE = """
19
  **Response Instructions:**
 
34
 
35
  Craft the response as a seamless, thorough, and authoritative explanation that naturally integrates all aspects of the query.
36
  """
 
 
 
 
 
 
 
 
 
 
 
37
 
38
+ # Load and split documents
 
 
 
39
  def load_doc(list_file_path):
 
 
 
 
40
  pages = []
41
+ for file_path in list_file_path:
42
+ if file_path.endswith('.pdf'):
43
+ loader = PyPDFLoader(file_path)
44
+ elif file_path.endswith('.txt'):
45
+ loader = TextLoader(file_path)
46
+ else:
47
+ continue
48
  pages.extend(loader.load())
49
+
50
  text_splitter = RecursiveCharacterTextSplitter(
51
+ chunk_size=1024,
52
+ chunk_overlap=64
53
  )
54
  doc_splits = text_splitter.split_documents(pages)
55
  return doc_splits
 
60
  vectordb = FAISS.from_documents(splits, embeddings)
61
  return vectordb
62
 
63
+ # Initialize langchain LLM chain with custom prompt
 
64
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
65
+ llm = HuggingFaceEndpoint(
66
+ repo_id=llm_model,
67
+ huggingfacehub_api_token=api_token,
68
+ temperature=temperature,
69
+ max_new_tokens=max_tokens,
70
+ top_k=top_k,
71
+ )
 
 
 
 
 
 
 
 
 
72
 
73
  memory = ConversationBufferMemory(
74
  memory_key="chat_history",
 
76
  return_messages=True
77
  )
78
 
79
+ # Create custom prompt
80
+ custom_prompt = PromptTemplate(
81
  template=CUSTOM_PROMPT_TEMPLATE,
82
  input_variables=["context", "question", "chat_history"]
83
  )
84
 
85
+ retriever = vector_db.as_retriever()
86
  qa_chain = ConversationalRetrievalChain.from_llm(
87
  llm,
88
  retriever=retriever,
89
+ chain_type="stuff",
90
  memory=memory,
 
91
  return_source_documents=True,
92
+ verbose=False,
93
+ combine_docs_chain_kwargs={"prompt": custom_prompt}
94
  )
95
  return qa_chain
96
 
97
  # Initialize database
98
  def initialize_database(list_file_obj, progress=gr.Progress()):
 
99
  list_file_path = [x.name for x in list_file_obj if x is not None]
 
100
  doc_splits = load_doc(list_file_path)
 
101
  vector_db = create_db(doc_splits)
102
  return vector_db, "Database created!"
103
 
104
  # Initialize LLM
105
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
 
106
  llm_name = list_llm[llm_option]
 
107
  qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
108
  return qa_chain, "QA chain initialized. Chatbot is ready!"
109
 
 
110
  def format_chat_history(message, chat_history):
111
  formatted_chat_history = []
112
  for user_message, bot_message in chat_history:
113
  formatted_chat_history.append(f"User: {user_message}")
114
  formatted_chat_history.append(f"Assistant: {bot_message}")
115
  return formatted_chat_history
 
116
 
117
  def conversation(qa_chain, message, history):
118
  formatted_chat_history = format_chat_history(message, history)
 
119
  response = qa_chain.invoke({"question": message, "chat_history": formatted_chat_history})
120
  response_answer = response["answer"]
 
 
121
  response_sources = response["source_documents"]
 
 
 
 
 
 
 
 
 
 
122
 
123
+ # Get sources (with fallback for when there are fewer than 3 sources)
124
+ sources_content = []
125
+ sources_pages = []
126
+ for i in range(3):
127
+ if i < len(response_sources):
128
+ sources_content.append(response_sources[i].page_content.strip())
129
+ sources_pages.append(response_sources[i].metadata.get("page", 0) + 1)
130
+ else:
131
+ sources_content.append("")
132
+ sources_pages.append(0)
133
+
134
+ new_history = history + [(message, response_answer)]
135
+ return (qa_chain, gr.update(value=""), new_history,
136
+ sources_content[0], sources_pages[0],
137
+ sources_content[1], sources_pages[1],
138
+ sources_content[2], sources_pages[2])
139
 
140
  def demo():
141
+ with gr.Blocks(theme=gr.themes.Default(primary_hue="red", secondary_hue="pink", neutral_hue="sky")) as demo:
 
142
  vector_db = gr.State()
143
  qa_chain = gr.State()
144
+ gr.HTML("<center><h1>RAG Document Chatbot</h1><center>")
145
+ gr.Markdown("""<b>Query your documents!</b> This AI agent performs retrieval augmented generation (RAG) on PDF and TXT documents.
146
  <b>Please do not upload confidential documents.</b>
147
  """)
148
+
149
  with gr.Row():
150
+ with gr.Column(scale=86):
151
+ gr.Markdown("<b>Step 1 - Upload Documents and Initialize RAG pipeline</b>")
152
  with gr.Row():
153
+ document = gr.Files(height=300, file_count="multiple",
154
+ file_types=["pdf", "txt"], interactive=True,
155
+ label="Upload PDF or TXT documents")
156
  with gr.Row():
157
  db_btn = gr.Button("Create vector database")
158
  with gr.Row():
159
+ db_progress = gr.Textbox(value="Not initialized", show_label=False)
160
+
161
+ gr.Markdown("<b>Select Large Language Model (LLM) and input parameters</b>")
162
  with gr.Row():
163
+ llm_btn = gr.Radio(list_llm_simple, label="Available LLMs",
164
+ value=list_llm_simple[0], type="index")
165
  with gr.Row():
166
  with gr.Accordion("LLM input parameters", open=False):
167
  with gr.Row():
168
+ slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.5,
169
+ step=0.1, label="Temperature")
170
  with gr.Row():
171
+ slider_maxtokens = gr.Slider(minimum=128, maximum=9192, value=4096,
172
+ step=128, label="Max New Tokens")
173
  with gr.Row():
174
+ slider_topk = gr.Slider(minimum=1, maximum=10, value=3,
175
+ step=1, label="top-k")
176
  with gr.Row():
177
  qachain_btn = gr.Button("Initialize Question Answering Chatbot")
178
  with gr.Row():
179
+ llm_progress = gr.Textbox(value="Not initialized", show_label=False)
180
 
181
+ with gr.Column(scale=200):
182
  gr.Markdown("<b>Step 2 - Chat with your Document</b>")
183
  chatbot = gr.Chatbot(height=505)
184
+ with gr.Accordion("Relevant context from the source document", open=False):
185
+ for i in range(1, 4):
186
+ with gr.Row():
187
+ doc_source = gr.Textbox(label=f"Reference {i}", lines=2,
188
+ container=True, scale=20)
189
+ source_page = gr.Number(label="Page", scale=1)
 
 
 
 
190
  with gr.Row():
191
  msg = gr.Textbox(placeholder="Ask a question", container=True)
192
  with gr.Row():
193
  submit_btn = gr.Button("Submit")
194
  clear_btn = gr.ClearButton([msg, chatbot], value="Clear")
195
+
196
+ # Event handlers
197
+ db_btn.click(initialize_database, inputs=[document], outputs=[vector_db, db_progress])
198
+ qachain_btn.click(initialize_LLM,
199
+ inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db],
200
+ outputs=[qa_chain, llm_progress]).then(
201
+ lambda: [None, "", 0, "", 0, "", 0],
202
+ outputs=[chatbot] + [doc for i in range(1,4) for doc in [globals()[f"doc_source{i}"], globals()[f"source_page{i}"]]],
203
+ queue=False)
204
+
205
+ msg.submit(conversation, inputs=[qa_chain, msg, chatbot],
206
+ outputs=[qa_chain, msg, chatbot] + [globals()[f"doc_source{i}"] for i in range(1,4)] + [globals()[f"source_page{i}"] for i in range(1,4)]],
207
+ queue=False)
208
+ submit_btn.click(conversation, inputs=[qa_chain, msg, chatbot],
209
+ outputs=[qa_chain, msg, chatbot] + [globals()[f"doc_source{i}"] for i in range(1,4)] + [globals()[f"source_page{i}"] for i in range(1,4)]],
210
+ queue=False)
211
+ clear_btn.click(lambda: [None, "", 0, "", 0, "", 0],
212
+ outputs=[chatbot] + [doc for i in range(1,4) for doc in [globals()[f"doc_source{i}"], globals()[f"source_page{i}"]]],
213
+ queue=False)
214
+
 
 
 
 
 
215
  demo.queue().launch(debug=True)
216
 
 
217
  if __name__ == "__main__":
218
  demo()