Wajahat698 commited on
Commit
8a7f07f
·
verified ·
1 Parent(s): 48be0e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -40
app.py CHANGED
@@ -36,35 +36,118 @@ openai.api_key = openai_api_key
36
 
37
  # Load knowledge base
38
  def load_knowledge_base():
39
- loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
40
- documents = loader.load()
41
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
42
- return text_splitter.split_documents(documents)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  knowledge_base = load_knowledge_base()
45
 
46
  # Initialize embeddings and FAISS index
47
- embeddings = OpenAIEmbeddings()
48
- db = FAISS.from_documents(knowledge_base, embeddings)
 
 
 
 
 
49
 
50
  # Define search function for knowledge base
51
  def search_knowledge_base(query):
52
- return db.similarity_search(query)
 
 
 
 
53
 
54
  # SERPER API Google Search function
55
  def google_search(query):
56
- search_client = serpapi.Client(api_key=serper_api_key)
57
- results = search_client.search({"engine": "google", "q": query})
58
- return [result["snippet"] for result in results.get("organic_results", [])]
 
 
 
 
 
 
 
59
 
60
  # RAG response function
61
  def rag_response(query):
62
- retrieved_docs = search_knowledge_base(query)
63
- context = "\n".join(doc.page_content for doc in retrieved_docs)
64
- prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
65
- llm = ChatOpenAI(model="gpt-4o", temperature=0.5, api_key=openai_api_key)
66
- response = llm.invoke(prompt)
67
- return response.content
 
 
 
 
68
 
69
  # Define tools using LangChain's `tool` decorator
70
  @tool
@@ -112,37 +195,51 @@ prompt_template = ChatPromptTemplate.from_messages(
112
  ]
113
  )
114
 
115
- llm = ChatOpenAI(model="gpt-4o", temperature=0.5)
116
- llm_with_tools = llm.bind_tools(tools)
117
-
118
- agent = (
119
- {
120
- "input": lambda x: x["input"],
121
- "agent_scratchpad": lambda x: format_to_openai_tool_messages(x["intermediate_steps"]),
122
- "chat_history": lambda x: x["chat_history"],
123
- }
124
- | prompt_template
125
- | llm_with_tools
126
- | OpenAIToolsAgentOutputParser()
127
- )
128
 
129
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  # Initialize chat history
132
  chat_history = []
133
 
134
  def chatbot_response(message, history):
135
- output = agent_executor.invoke({"input": message, "chat_history": chat_history})
136
- chat_history.extend(
137
- [
138
- HumanMessage(content=message),
139
- AIMessage(content=output["output"]),
140
- ]
141
- )
142
- return output["output"]
 
 
 
 
143
 
144
  # Streamlit app
145
- st.title("Chatbot")
146
 
147
  user_input = st.text_input("You:", "")
148
  if st.button("Submit"):
@@ -150,4 +247,4 @@ if st.button("Submit"):
150
  response = chatbot_response(user_input, chat_history)
151
  st.write("AI:", response)
152
  else:
153
- st.warning("Please enter a message.")
 
36
 
37
  # Load knowledge base
38
  def load_knowledge_base():
39
+ try:
40
+ loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
41
+ documents = loader.load()
42
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
43
+ return text_splitter.split_documents(documents)
44
+ except Exception as e:
45
+
46
+
47
+
48
+
49
+
50
+ You said:
51
+ fix the erros and return me good code
52
+ ChatGPT said:
53
+ ChatGPT
54
+ Sure! I'll provide a revised version of your Streamlit code with improvements, including better error handling, consistent configurations, and logging.
55
+
56
+ Revised Streamlit Code
57
+ python
58
+ Copy code
59
+ import logging
60
+ import os
61
+ from dotenv import load_dotenv
62
+ import openai
63
+ from langchain_openai import ChatOpenAI
64
+ from langchain_community.vectorstores import FAISS
65
+ from langchain_openai import OpenAIEmbeddings
66
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
67
+ from langchain.agents import tool, AgentExecutor
68
+ from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
69
+ from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages
70
+ from langchain_core.messages import AIMessage, HumanMessage
71
+ from langchain_community.document_loaders import TextLoader
72
+ from langchain_text_splitters import CharacterTextSplitter
73
+ import serpapi
74
+ import streamlit as st
75
+
76
+ # Initialize logging
77
+ logging.basicConfig(level=logging.INFO)
78
+ logger = logging.getLogger(__name__)
79
+
80
+ # Load environment variables from .env file
81
+ load_dotenv()
82
+
83
+ # Define and validate API keys
84
+ openai_api_key = os.getenv("OPENAI_API_KEY")
85
+ serper_api_key = os.getenv("SERPER_API_KEY")
86
+
87
+ if not openai_api_key or not serper_api_key:
88
+ st.error("API keys for OpenAI and SERPER must be set in the .env file.")
89
+ logger.error("API keys for OpenAI and SERPER must be set in the .env file.")
90
+ st.stop()
91
+
92
+ # Initialize OpenAI client
93
+ openai.api_key = openai_api_key
94
+
95
+ # Load knowledge base
96
+ def load_knowledge_base():
97
+ try:
98
+ loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
99
+ documents = loader.load()
100
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
101
+ return text_splitter.split_documents(documents)
102
+ except Exception as e:
103
+ logger.error(f"Error loading knowledge base: {e}")
104
+ st.error("Error loading knowledge base.")
105
+ st.stop()
106
 
107
  knowledge_base = load_knowledge_base()
108
 
109
  # Initialize embeddings and FAISS index
110
+ try:
111
+ embeddings = OpenAIEmbeddings()
112
+ db = FAISS.from_documents(knowledge_base, embeddings)
113
+ except Exception as e:
114
+ logger.error(f"Error initializing FAISS index: {e}")
115
+ st.error("Error initializing FAISS index.")
116
+ st.stop()
117
 
118
  # Define search function for knowledge base
119
  def search_knowledge_base(query):
120
+ try:
121
+ return db.similarity_search(query)
122
+ except Exception as e:
123
+ logger.error(f"Error searching knowledge base: {e}")
124
+ return ["Error occurred during knowledge base search"]
125
 
126
  # SERPER API Google Search function
127
  def google_search(query):
128
+ try:
129
+ search_client = serpapi.Client(api_key=serper_api_key)
130
+ results = search_client.search({"engine": "google", "q": query})
131
+ return [result["snippet"] for result in results.get("organic_results", [])]
132
+ except requests.exceptions.HTTPError as http_err:
133
+ logger.error(f"HTTP error occurred: {http_err}")
134
+ return ["HTTP error occurred during Google search"]
135
+ except Exception as e:
136
+ logger.error(f"Error during Google search: {e}")
137
+ return ["Error occurred during Google search"]
138
 
139
  # RAG response function
140
  def rag_response(query):
141
+ try:
142
+ retrieved_docs = search_knowledge_base(query)
143
+ context = "\n".join(doc.page_content for doc in retrieved_docs)
144
+ prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
145
+ llm = ChatOpenAI(model="gpt-4o", temperature=0.5, api_key=openai_api_key)
146
+ response = llm.invoke(prompt)
147
+ return response.content
148
+ except Exception as e:
149
+ logger.error(f"Error generating RAG response: {e}")
150
+ return "Error occurred during RAG response generation"
151
 
152
  # Define tools using LangChain's `tool` decorator
153
  @tool
 
195
  ]
196
  )
197
 
198
+ # Create Langchain Agent with specific model and temperature
199
+ try:
200
+ llm = ChatOpenAI(model="gpt-4o", temperature=0.5)
201
+ llm_with_tools = llm.bind_tools(tools)
202
+ except Exception as e:
203
+ logger.error(f"Error creating Langchain Agent: {e}")
204
+ st.error("Error creating Langchain Agent.")
205
+ st.stop()
 
 
 
 
 
206
 
207
+ # Define the agent pipeline to handle the conversation flow
208
+ try:
209
+ agent = (
210
+ {
211
+ "input": lambda x: x["input"],
212
+ "agent_scratchpad": lambda x: format_to_openai_tool_messages(x["intermediate_steps"]),
213
+ "chat_history": lambda x: x["chat_history"],
214
+ }
215
+ | prompt_template
216
+ | llm_with_tools
217
+ | OpenAIToolsAgentOutputParser()
218
+ )
219
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
220
+ except Exception as e:
221
+ logger.error(f"Error defining agent pipeline: {e}")
222
+ st.error("Error defining agent pipeline.")
223
+ st.stop()
224
 
225
  # Initialize chat history
226
  chat_history = []
227
 
228
  def chatbot_response(message, history):
229
+ try:
230
+ output = agent_executor.invoke({"input": message, "chat_history": chat_history})
231
+ chat_history.extend(
232
+ [
233
+ HumanMessage(content=message),
234
+ AIMessage(content=output["output"]),
235
+ ]
236
+ )
237
+ return output["output"]
238
+ except Exception as e:
239
+ logger.error(f"Error generating chatbot response: {e}")
240
+ return "Error occurred during response generation"
241
 
242
  # Streamlit app
 
243
 
244
  user_input = st.text_input("You:", "")
245
  if st.button("Submit"):
 
247
  response = chatbot_response(user_input, chat_history)
248
  st.write("AI:", response)
249
  else:
250
+ st.warning("Please enter a message."