Wajahat698 commited on
Commit
764528b
·
verified ·
1 Parent(s): 588465b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -115
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import logging
2
  import os
3
  import requests
4
  from dotenv import load_dotenv
@@ -28,102 +28,51 @@ openai_api_key = os.getenv("OPENAI_API_KEY")
28
  serper_api_key = os.getenv("SERPER_API_KEY")
29
 
30
  if not openai_api_key or not serper_api_key:
31
- logger.error("API keys are not set properly.")
32
  st.error("API keys for OpenAI and SERPER must be set in the .env file.")
33
  st.stop()
34
- else:
35
- logger.info("API keys loaded successfully.")
36
 
37
  # Initialize OpenAI client
38
- try:
39
- openai.api_key = openai_api_key
40
- logger.info("OpenAI client initialized successfully.")
41
- except Exception as e:
42
- logger.error(f"Error initializing OpenAI client: {e}")
43
- st.error(f"Error initializing OpenAI client: {e}")
44
- st.stop()
45
 
46
  # Load knowledge base
47
  def load_knowledge_base():
48
- try:
49
- loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
50
- documents = loader.load()
51
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
52
- docs = text_splitter.split_documents(documents)
53
- return docs
54
- except Exception as e:
55
- logger.error(f"Error loading knowledge base: {e}")
56
- st.error(f"Error loading knowledge base: {e}")
57
- st.stop()
58
 
59
  knowledge_base = load_knowledge_base()
60
 
61
  # Initialize embeddings and FAISS index
62
- try:
63
- embeddings = OpenAIEmbeddings()
64
- db = FAISS.from_documents(knowledge_base, embeddings)
65
- except Exception as e:
66
- logger.error(f"Error initializing FAISS index: {e}")
67
- st.error(f"Error initializing FAISS index: {e}")
68
- st.stop()
69
 
70
  # Define search function for knowledge base
71
  def search_knowledge_base(query):
72
- try:
73
- output = db.similarity_search(query)
74
- return output
75
- except Exception as e:
76
- logger.error(f"Error searching knowledge base: {e}")
77
- return ["Error occurred during knowledge base search"]
78
 
79
  # SERPER API Google Search function
80
  def google_search(query):
81
- try:
82
- search_client = serpapi.Client(api_key=serper_api_key)
83
- results = search_client.search({"engine": "google", "q": query})
84
- snippets = [result["snippet"] for result in results.get("organic_results", [])]
85
- return snippets
86
- except requests.exceptions.HTTPError as http_err:
87
- logger.error(f"HTTP error occurred: {http_err}")
88
- return ["HTTP error occurred during Google search"]
89
- except Exception as e:
90
- logger.error(f"General Error: {e}")
91
- return ["Error occurred during Google search"]
92
 
93
  # RAG response function
94
  def rag_response(query):
95
- try:
96
- retrieved_docs = search_knowledge_base(query)
97
- context = "\n".join(doc.page_content for doc in retrieved_docs)
98
- prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
99
- llm = ChatOpenAI(model="gpt-4o", temperature=0.5, api_key=openai_api_key)
100
- response = llm.invoke(prompt)
101
- return response.content
102
- except Exception as e:
103
- logger.error(f"Error generating RAG response: {e}")
104
- return "Error occurred during RAG response generation"
105
 
106
  # Define tools using LangChain's `tool` decorator
107
  @tool
108
  def knowledge_base_tool(query: str):
109
- """
110
- Tool function to query the knowledge base and retrieve a response.
111
- Args:
112
- query (str): The query to search the knowledge base.
113
- Returns:
114
- str: The response retrieved from the knowledge base.
115
- """
116
  return rag_response(query)
117
 
118
  @tool
119
  def google_search_tool(query: str):
120
- """
121
- Tool function to perform a Google search using the SERPER API.
122
- Args:
123
- query (str): The query to search on Google.
124
- Returns:
125
- list: List of snippets extracted from search results.
126
- """
127
  return google_search(query)
128
 
129
  tools = [knowledge_base_tool, google_search_tool]
@@ -144,62 +93,39 @@ prompt_template = ChatPromptTemplate.from_messages(
144
  ]
145
  )
146
 
147
- # Create Langchain Agent with specific model and temperature
148
- try:
149
- llm = ChatOpenAI(model="gpt-4o", temperature=0.5) # Set temperature to 0.5
150
- llm_with_tools = llm.bind_tools(tools)
151
- except Exception as e:
152
- logger.error(f"Error creating Langchain Agent: {e}")
153
- st.error(f"Error creating Langchain Agent: {e}")
154
- st.stop()
155
-
156
- # Define the agent pipeline to handle the conversation flow
157
- try:
158
- agent = (
159
- {
160
- "input": lambda x: x["input"],
161
- "agent_scratchpad": lambda x: format_to_openai_tool_messages(x["intermediate_steps"]),
162
- "chat_history": lambda x: x["chat_history"],
163
- }
164
- | prompt_template
165
- | llm_with_tools
166
- | OpenAIToolsAgentOutputParser()
167
- )
168
 
169
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
170
- except Exception as e:
171
- logger.error(f"Error defining agent pipeline: {e}")
172
- st.error(f"Error defining agent pipeline: {e}")
173
- st.stop()
174
 
175
  # Initialize chat history
176
  chat_history = []
177
 
178
  def chatbot_response(message, history):
179
- try:
180
- # Generate response using the agent executor
181
- output = agent_executor.invoke({"input": message, "chat_history": chat_history})
182
-
183
- # Save the interaction context
184
- chat_history.extend(
185
- [
186
- HumanMessage(content=message),
187
- AIMessage(content=output["output"]),
188
- ]
189
- )
190
-
191
- return output["output"]
192
- except Exception as e:
193
- logger.error(f"Error generating chatbot response: {e}")
194
- return "Error occurred during response generation"
195
 
196
  # Streamlit app
 
197
 
198
-
199
- # Create input field for user message
200
  user_input = st.text_input("You:", "")
201
-
202
- # Create a button for submitting the message
203
  if st.button("Submit"):
204
  if user_input:
205
  response = chatbot_response(user_input, chat_history)
 
1
+ mport logging
2
  import os
3
  import requests
4
  from dotenv import load_dotenv
 
28
  serper_api_key = os.getenv("SERPER_API_KEY")
29
 
30
  if not openai_api_key or not serper_api_key:
 
31
  st.error("API keys for OpenAI and SERPER must be set in the .env file.")
32
  st.stop()
 
 
33
 
34
  # Initialize OpenAI client
35
+ openai.api_key = openai_api_key
 
 
 
 
 
 
36
 
37
  # Load knowledge base
38
  def load_knowledge_base():
39
+ loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
40
+ documents = loader.load()
41
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
42
+ return text_splitter.split_documents(documents)
 
 
 
 
 
 
43
 
44
  knowledge_base = load_knowledge_base()
45
 
46
  # Initialize embeddings and FAISS index
47
+ embeddings = OpenAIEmbeddings()
48
+ db = FAISS.from_documents(knowledge_base, embeddings)
 
 
 
 
 
49
 
50
  # Define search function for knowledge base
51
  def search_knowledge_base(query):
52
+ return db.similarity_search(query)
 
 
 
 
 
53
 
54
  # SERPER API Google Search function
55
  def google_search(query):
56
+ search_client = serpapi.Client(api_key=serper_api_key)
57
+ results = search_client.search({"engine": "google", "q": query})
58
+ return [result["snippet"] for result in results.get("organic_results", [])]
 
 
 
 
 
 
 
 
59
 
60
  # RAG response function
61
  def rag_response(query):
62
+ retrieved_docs = search_knowledge_base(query)
63
+ context = "\n".join(doc.page_content for doc in retrieved_docs)
64
+ prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
65
+ llm = ChatOpenAI(model="gpt-4o", temperature=0.5, api_key=openai_api_key)
66
+ response = llm.invoke(prompt)
67
+ return response.content
 
 
 
 
68
 
69
  # Define tools using LangChain's `tool` decorator
70
  @tool
71
  def knowledge_base_tool(query: str):
 
 
 
 
 
 
 
72
  return rag_response(query)
73
 
74
  @tool
75
  def google_search_tool(query: str):
 
 
 
 
 
 
 
76
  return google_search(query)
77
 
78
  tools = [knowledge_base_tool, google_search_tool]
 
93
  ]
94
  )
95
 
96
+ llm = ChatOpenAI(model="gpt-4o", temperature=0.5)
97
+ llm_with_tools = llm.bind_tools(tools)
98
+
99
+ agent = (
100
+ {
101
+ "input": lambda x: x["input"],
102
+ "agent_scratchpad": lambda x: format_to_openai_tool_messages(x["intermediate_steps"]),
103
+ "chat_history": lambda x: x["chat_history"],
104
+ }
105
+ | prompt_template
106
+ | llm_with_tools
107
+ | OpenAIToolsAgentOutputParser()
108
+ )
 
 
 
 
 
 
 
 
109
 
110
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
 
 
 
 
111
 
112
  # Initialize chat history
113
  chat_history = []
114
 
115
  def chatbot_response(message, history):
116
+ output = agent_executor.invoke({"input": message, "chat_history": chat_history})
117
+ chat_history.extend(
118
+ [
119
+ HumanMessage(content=message),
120
+ AIMessage(content=output["output"]),
121
+ ]
122
+ )
123
+ return output["output"]
 
 
 
 
 
 
 
 
124
 
125
  # Streamlit app
126
+ st.title("Chatbot")
127
 
 
 
128
  user_input = st.text_input("You:", "")
 
 
129
  if st.button("Submit"):
130
  if user_input:
131
  response = chatbot_response(user_input, chat_history)