Wajahat698 commited on
Commit
cd6d88e
·
verified ·
1 Parent(s): d54902f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -30
app.py CHANGED
@@ -1,7 +1,8 @@
1
-
2
  import logging
3
  import os
 
4
  from dotenv import load_dotenv
 
5
  import openai
6
  from langchain_openai import ChatOpenAI
7
  from langchain_community.vectorstores import FAISS
@@ -9,12 +10,13 @@ from langchain_openai import OpenAIEmbeddings
9
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
10
  from langchain.agents import tool, AgentExecutor
11
  from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
12
- from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages
 
 
13
  from langchain_core.messages import AIMessage, HumanMessage
14
  from langchain_community.document_loaders import TextLoader
15
  from langchain_text_splitters import CharacterTextSplitter
16
  import serpapi
17
- import streamlit as st
18
 
19
  # Initialize logging
20
  logging.basicConfig(level=logging.INFO)
@@ -28,12 +30,19 @@ openai_api_key = os.getenv("OPENAI_API_KEY")
28
  serper_api_key = os.getenv("SERPER_API_KEY")
29
 
30
  if not openai_api_key or not serper_api_key:
31
- st.error("API keys for OpenAI and SERPER must be set in the .env file.")
32
- logger.error("API keys for OpenAI and SERPER must be set in the .env file.")
33
- st.stop()
 
34
 
35
  # Initialize OpenAI client
36
- openai.api_key = openai_api_key
 
 
 
 
 
 
37
 
38
  # Load knowledge base
39
  def load_knowledge_base():
@@ -41,11 +50,12 @@ def load_knowledge_base():
41
  loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
42
  documents = loader.load()
43
  text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
44
- return text_splitter.split_documents(documents)
 
45
  except Exception as e:
46
  logger.error(f"Error loading knowledge base: {e}")
47
- st.error("Error loading knowledge base.")
48
- st.stop()
49
 
50
  knowledge_base = load_knowledge_base()
51
 
@@ -55,30 +65,39 @@ try:
55
  db = FAISS.from_documents(knowledge_base, embeddings)
56
  except Exception as e:
57
  logger.error(f"Error initializing FAISS index: {e}")
58
- st.error("Error initializing FAISS index.")
59
- st.stop()
60
 
61
  # Define search function for knowledge base
62
  def search_knowledge_base(query):
63
  try:
64
- return db.similarity_search(query)
 
65
  except Exception as e:
66
  logger.error(f"Error searching knowledge base: {e}")
67
  return ["Error occurred during knowledge base search"]
68
 
 
69
  # SERPER API Google Search function
70
  def google_search(query):
71
  try:
72
  search_client = serpapi.Client(api_key=serper_api_key)
73
- results = search_client.search({"engine": "google", "q": query})
74
- return [result["snippet"] for result in results.get("organic_results", [])]
 
 
 
 
 
 
75
  except requests.exceptions.HTTPError as http_err:
76
  logger.error(f"HTTP error occurred: {http_err}")
77
  return ["HTTP error occurred during Google search"]
78
  except Exception as e:
79
- logger.error(f"Error during Google search: {e}")
80
  return ["Error occurred during Google search"]
81
 
 
82
  # RAG response function
83
  def rag_response(query):
84
  try:
@@ -92,6 +111,7 @@ def rag_response(query):
92
  logger.error(f"Error generating RAG response: {e}")
93
  return "Error occurred during RAG response generation"
94
 
 
95
  # Define tools using LangChain's `tool` decorator
96
  @tool
97
  def knowledge_base_tool(query: str):
@@ -140,54 +160,86 @@ prompt_template = ChatPromptTemplate.from_messages(
140
 
141
  # Create Langchain Agent with specific model and temperature
142
  try:
143
- llm = ChatOpenAI(model="gpt-4o", temperature=0.5)
144
  llm_with_tools = llm.bind_tools(tools)
145
  except Exception as e:
146
  logger.error(f"Error creating Langchain Agent: {e}")
147
- st.error("Error creating Langchain Agent.")
148
- st.stop()
149
 
150
  # Define the agent pipeline to handle the conversation flow
151
  try:
152
  agent = (
153
  {
154
  "input": lambda x: x["input"],
155
- "agent_scratchpad": lambda x: format_to_openai_tool_messages(x["intermediate_steps"]),
 
 
156
  "chat_history": lambda x: x["chat_history"],
157
  }
158
  | prompt_template
159
  | llm_with_tools
160
  | OpenAIToolsAgentOutputParser()
161
  )
 
 
162
  agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
163
  except Exception as e:
164
  logger.error(f"Error defining agent pipeline: {e}")
165
- st.error("Error defining agent pipeline.")
166
- st.stop()
167
 
168
  # Initialize chat history
169
  chat_history = []
170
 
 
171
  def chatbot_response(message, history):
172
  try:
 
173
  output = agent_executor.invoke({"input": message, "chat_history": chat_history})
 
 
174
  chat_history.extend(
175
  [
176
  HumanMessage(content=message),
177
  AIMessage(content=output["output"]),
178
  ]
179
  )
 
180
  return output["output"]
181
  except Exception as e:
182
  logger.error(f"Error generating chatbot response: {e}")
183
  return "Error occurred during response generation"
184
 
185
- # Streamlit app
186
 
187
- user_input = st.text_input("You:", "")
188
- if st.button("Submit"):
189
- if user_input:
190
- response = chatbot_response(user_input, chat_history)
191
- st.write("AI:", response)
192
- else:
193
- st.warning("Please enter a message.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import logging
2
  import os
3
+ import requests
4
  from dotenv import load_dotenv
5
+ import gradio as gr
6
  import openai
7
  from langchain_openai import ChatOpenAI
8
  from langchain_community.vectorstores import FAISS
 
10
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
11
  from langchain.agents import tool, AgentExecutor
12
  from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
13
+ from langchain.agents.format_scratchpad.openai_tools import (
14
+ format_to_openai_tool_messages,
15
+ )
16
  from langchain_core.messages import AIMessage, HumanMessage
17
  from langchain_community.document_loaders import TextLoader
18
  from langchain_text_splitters import CharacterTextSplitter
19
  import serpapi
 
20
 
21
  # Initialize logging
22
  logging.basicConfig(level=logging.INFO)
 
30
  serper_api_key = os.getenv("SERPER_API_KEY")
31
 
32
  if not openai_api_key or not serper_api_key:
33
+ logger.error("API keys are not set properly.")
34
+ raise ValueError("API keys for OpenAI and SERPER must be set in the .env file.")
35
+ else:
36
+ logger.info("API keys loaded successfully.")
37
 
38
  # Initialize OpenAI client
39
+ try:
40
+ openai.api_key = openai_api_key
41
+ logger.info("OpenAI client initialized successfully.")
42
+ except Exception as e:
43
+ logger.error(f"Error initializing OpenAI client: {e}")
44
+ raise e
45
+
46
 
47
  # Load knowledge base
48
  def load_knowledge_base():
 
50
  loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
51
  documents = loader.load()
52
  text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
53
+ docs = text_splitter.split_documents(documents)
54
+ return docs
55
  except Exception as e:
56
  logger.error(f"Error loading knowledge base: {e}")
57
+ raise e
58
+
59
 
60
  knowledge_base = load_knowledge_base()
61
 
 
65
  db = FAISS.from_documents(knowledge_base, embeddings)
66
  except Exception as e:
67
  logger.error(f"Error initializing FAISS index: {e}")
68
+ raise e
69
+
70
 
71
  # Define search function for knowledge base
72
  def search_knowledge_base(query):
73
  try:
74
+ output = db.similarity_search(query)
75
+ return output
76
  except Exception as e:
77
  logger.error(f"Error searching knowledge base: {e}")
78
  return ["Error occurred during knowledge base search"]
79
 
80
+
81
  # SERPER API Google Search function
82
  def google_search(query):
83
  try:
84
  search_client = serpapi.Client(api_key=serper_api_key)
85
+ results = search_client.search(
86
+ {
87
+ "engine": "google",
88
+ "q": query,
89
+ }
90
+ )
91
+ snippets = [result["snippet"] for result in results.get("organic_results", [])]
92
+ return snippets
93
  except requests.exceptions.HTTPError as http_err:
94
  logger.error(f"HTTP error occurred: {http_err}")
95
  return ["HTTP error occurred during Google search"]
96
  except Exception as e:
97
+ logger.error(f"General Error: {e}")
98
  return ["Error occurred during Google search"]
99
 
100
+
101
  # RAG response function
102
  def rag_response(query):
103
  try:
 
111
  logger.error(f"Error generating RAG response: {e}")
112
  return "Error occurred during RAG response generation"
113
 
114
+
115
  # Define tools using LangChain's `tool` decorator
116
  @tool
117
  def knowledge_base_tool(query: str):
 
160
 
161
  # Create Langchain Agent with specific model and temperature
162
  try:
163
+ llm = ChatOpenAI(model="gpt-4o", temperature=0.5) # Set temperature to 0.5
164
  llm_with_tools = llm.bind_tools(tools)
165
  except Exception as e:
166
  logger.error(f"Error creating Langchain Agent: {e}")
 
 
167
 
168
  # Define the agent pipeline to handle the conversation flow
169
  try:
170
  agent = (
171
  {
172
  "input": lambda x: x["input"],
173
+ "agent_scratchpad": lambda x: format_to_openai_tool_messages(
174
+ x["intermediate_steps"]
175
+ ),
176
  "chat_history": lambda x: x["chat_history"],
177
  }
178
  | prompt_template
179
  | llm_with_tools
180
  | OpenAIToolsAgentOutputParser()
181
  )
182
+
183
+ # Instantiate an AgentExecutor to execute the defined agent pipeline
184
  agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
185
  except Exception as e:
186
  logger.error(f"Error defining agent pipeline: {e}")
 
 
187
 
188
  # Initialize chat history
189
  chat_history = []
190
 
191
+
192
  def chatbot_response(message, history):
193
  try:
194
+ # Generate response using the agent executor
195
  output = agent_executor.invoke({"input": message, "chat_history": chat_history})
196
+
197
+ # Save the interaction context
198
  chat_history.extend(
199
  [
200
  HumanMessage(content=message),
201
  AIMessage(content=output["output"]),
202
  ]
203
  )
204
+
205
  return output["output"]
206
  except Exception as e:
207
  logger.error(f"Error generating chatbot response: {e}")
208
  return "Error occurred during response generation"
209
 
 
210
 
211
+ # # Define CSS for Gradio interface
212
+ # CSS = """
213
+ # .contain { display: flex; flex-direction: column; height: 100vh; }
214
+ # #component-0 { height: 90%; }
215
+ # """
216
+
217
+ # # Gradio interface
218
+ # with gr.Blocks(css=CSS) as demo:
219
+
220
+ submit_button = gr.Button("Submit")
221
+
222
+ bot = gr.Chatbot()
223
+
224
+ with gr.Blocks() as demo:
225
+ gr.Markdown(
226
+ "<span style='font-size:20px; font-weight:bold;'>Instant Insight-2-Action</span>",
227
+ visible=True,
228
+ )
229
+
230
+ chatbot = gr.ChatInterface(
231
+ fn=chatbot_response,
232
+ stop_btn=None,
233
+ retry_btn=None,
234
+ undo_btn=None,
235
+ clear_btn=None,
236
+ submit_btn=submit_button,
237
+ chatbot=bot,
238
+ )
239
+
240
+ # Launch the Gradio app
241
+ try:
242
+ demo.launch(server_name="0.0.0.0")
243
+ except Exception as e:
244
+ logger.error(f"Error launching Gradio app: {e}")
245
+ raise e