Wajahat698 commited on
Commit
95422b8
·
verified ·
1 Parent(s): 7522fcc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -45
app.py CHANGED
@@ -1,26 +1,41 @@
 
1
  import os
2
  import streamlit as st
 
3
  import openai
4
- from langchain.embeddings.openai import OpenAIEmbeddings
5
- from langchain.vectorstores.faiss import FAISS
6
- from langchain.document_loaders import TextLoader
7
- from langchain.chains import LLMChain
8
- from langchain.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
9
- from langchain.chat_models import ChatOpenAI
10
- from langchain.schema import HumanMessage, AIMessage
11
- from serpapi import GoogleSearch
12
- import logging
 
 
 
13
 
14
- # Configure logging
15
- logging.basicConfig(level=logging.ERROR)
16
  logger = logging.getLogger(__name__)
17
 
18
  # Load environment variables
 
 
 
19
  openai_api_key = os.getenv("OPENAI_API_KEY")
20
  serper_api_key = os.getenv("SERPER_API_KEY")
 
 
 
 
 
 
21
  openai.api_key = openai_api_key
22
 
23
  # Load knowledge base
 
24
  def load_knowledge_base():
25
  try:
26
  loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
@@ -35,12 +50,8 @@ def load_knowledge_base():
35
  knowledge_base = load_knowledge_base()
36
 
37
  # Initialize embeddings and FAISS index
38
- try:
39
- embeddings = OpenAIEmbeddings()
40
- db = FAISS.from_documents(knowledge_base, embeddings)
41
- except Exception as e:
42
- logger.error(f"Error initializing FAISS index: {e}")
43
- raise e
44
 
45
  # Define search function for knowledge base
46
  def search_knowledge_base(query):
@@ -54,8 +65,11 @@ def search_knowledge_base(query):
54
  # SERPER API Google Search function
55
  def google_search(query):
56
  try:
57
- search_client = GoogleSearch({"q": query, "api_key": serper_api_key})
58
- results = search_client.get_dict()
 
 
 
59
  snippets = [result["snippet"] for result in results.get("organic_results", [])]
60
  return snippets
61
  except requests.exceptions.HTTPError as http_err:
@@ -70,42 +84,104 @@ def rag_response(query):
70
  try:
71
  retrieved_docs = search_knowledge_base(query)
72
  context = "\n".join(doc.page_content for doc in retrieved_docs)
73
- google_results = google_search(query)
74
- combined_context = context + "\n" + "\n".join(google_results)
75
-
76
- prompt = f"Context:\n{combined_context}\n\nQuestion: {query}\nAnswer:"
77
- llm = ChatOpenAI(model="gpt-4", temperature=0.5, api_key=openai_api_key)
78
  response = llm.invoke(prompt)
79
  return response.content
80
  except Exception as e:
81
  logger.error(f"Error generating RAG response: {e}")
82
  return "Error occurred during RAG response generation"
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  # Initialize chat history
85
  if 'chat_history' not in st.session_state:
86
  st.session_state.chat_history = []
87
 
88
- # Function to handle chat responses
89
- def chatbot_response(message):
90
- try:
91
- response = rag_response(message)
92
- st.session_state.chat_history.append(HumanMessage(content=message))
93
- st.session_state.chat_history.append(AIMessage(content=response))
94
- return response
95
- except Exception as e:
96
- logger.error(f"Error generating chatbot response: {e}")
97
- return "Error occurred during response generation"
98
 
99
- # Streamlit UI setup
100
- st.title("Instant Insight-2-Action")
 
 
 
 
 
 
101
 
102
- prompt = st.chat_input("Type your prompt here...", key="unique_chat_input_key")
103
- if prompt:
104
- response = chatbot_response(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
- # Display chat history
107
- for msg in st.session_state.chat_history:
108
- if isinstance(msg, HumanMessage):
109
- st.write(f"You: {msg.content}")
110
- elif isinstance(msg, AIMessage):
111
- st.write(f"Bot: {msg.content}")
 
1
+ import logging
2
  import os
3
  import streamlit as st
4
+ from dotenv import load_dotenv
5
  import openai
6
+ from langchain_openai import ChatOpenAI
7
+ from langchain_community.vectorstores import FAISS
8
+ from langchain_openai import OpenAIEmbeddings
9
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
10
+ from langchain.agents import tool, AgentExecutor
11
+ from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
12
+ from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages
13
+ from langchain_core.messages import AIMessage, HumanMessage
14
+ from langchain_community.document_loaders import TextLoader
15
+ from langchain_text_splitters import CharacterTextSplitter
16
+ import serpapi
17
+ import requests
18
 
19
+ # Initialize logging
20
+ logging.basicConfig(level=logging.INFO)
21
  logger = logging.getLogger(__name__)
22
 
23
  # Load environment variables
24
+ load_dotenv()
25
+
26
+ # Define and validate API keys
27
  openai_api_key = os.getenv("OPENAI_API_KEY")
28
  serper_api_key = os.getenv("SERPER_API_KEY")
29
+
30
+ if not openai_api_key or not serper_api_key:
31
+ logger.error("API keys are not set properly.")
32
+ raise ValueError("API keys for OpenAI and SERPER must be set in the .env file.")
33
+
34
+ # Initialize OpenAI client
35
  openai.api_key = openai_api_key
36
 
37
  # Load knowledge base
38
+ @st.cache_resource
39
  def load_knowledge_base():
40
  try:
41
  loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
 
50
  knowledge_base = load_knowledge_base()
51
 
52
  # Initialize embeddings and FAISS index
53
+ embeddings = OpenAIEmbeddings()
54
+ db = FAISS.from_documents(knowledge_base, embeddings)
 
 
 
 
55
 
56
  # Define search function for knowledge base
57
  def search_knowledge_base(query):
 
65
  # SERPER API Google Search function
66
  def google_search(query):
67
  try:
68
+ search_client = serpapi.Client(api_key=serper_api_key)
69
+ results = search_client.search({
70
+ "engine": "google",
71
+ "q": query,
72
+ })
73
  snippets = [result["snippet"] for result in results.get("organic_results", [])]
74
  return snippets
75
  except requests.exceptions.HTTPError as http_err:
 
84
  try:
85
  retrieved_docs = search_knowledge_base(query)
86
  context = "\n".join(doc.page_content for doc in retrieved_docs)
87
+ prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
88
+ llm = ChatOpenAI(model="gpt-4o", temperature=0.5, api_key=openai_api_key)
 
 
 
89
  response = llm.invoke(prompt)
90
  return response.content
91
  except Exception as e:
92
  logger.error(f"Error generating RAG response: {e}")
93
  return "Error occurred during RAG response generation"
94
 
95
+ # Define tools
96
+ @tool
97
+ def knowledge_base_tool(query: str):
98
+ """Query the knowledge base and retrieve a response."""
99
+ return rag_response(query)
100
+
101
+ @tool
102
+ def google_search_tool(query: str):
103
+ """Perform a Google search using the SERPER API."""
104
+ return google_search(query)
105
+
106
+ tools = [knowledge_base_tool, google_search_tool]
107
+
108
+ # Create the prompt template
109
+ prompt_message = """
110
+ Act as an expert copywriter who specializes in creating compelling marketing copy using AI technologies.
111
+ Engage in a friendly and informative conversation based on the knowledge base.
112
+ Only proceed to create sales materials when the user explicitly requests it.
113
+ Work together with the user to update the outcome of the sales material.
114
+ """
115
+
116
+ prompt_template = ChatPromptTemplate.from_messages([
117
+ ("system", prompt_message),
118
+ MessagesPlaceholder(variable_name="chat_history"),
119
+ ("user", "{input}"),
120
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
121
+ ])
122
+
123
+ # Create Langchain Agent
124
+ llm = ChatOpenAI(model="gpt-4o", temperature=0.5)
125
+ llm_with_tools = llm.bind_tools(tools)
126
+
127
+ # Define the agent pipeline
128
+ agent = (
129
+ {
130
+ "input": lambda x: x["input"],
131
+ "agent_scratchpad": lambda x: format_to_openai_tool_messages(x["intermediate_steps"]),
132
+ "chat_history": lambda x: x["chat_history"],
133
+ }
134
+ | prompt_template
135
+ | llm_with_tools
136
+ | OpenAIToolsAgentOutputParser()
137
+ )
138
+
139
+ # Instantiate an AgentExecutor
140
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
141
+
142
+ # Streamlit app
143
+ st.title("AI Copywriting Assistant")
144
+
145
  # Initialize chat history
146
  if 'chat_history' not in st.session_state:
147
  st.session_state.chat_history = []
148
 
149
+ # Display chat history
150
+ for message in st.session_state.chat_history:
151
+ with st.chat_message(message["role"]):
152
+ st.markdown(message["content"])
 
 
 
 
 
 
153
 
154
+ # Chat input
155
+ if prompt := st.chat_input("Type your message here..."):
156
+ # Add user message to chat history
157
+ st.session_state.chat_history.append({"role": "user", "content": prompt})
158
+
159
+ # Display user message
160
+ with st.chat_message("user"):
161
+ st.markdown(prompt)
162
 
163
+ # Generate AI response
164
+ with st.chat_message("assistant"):
165
+ message_placeholder = st.empty()
166
+ full_response = ""
167
+
168
+ try:
169
+ # Generate response using the agent executor
170
+ output = agent_executor.invoke({
171
+ "input": prompt,
172
+ "chat_history": st.session_state.chat_history
173
+ })
174
+ full_response = output["output"]
175
+
176
+ # Display the response word by word
177
+ for chunk in full_response.split():
178
+ full_response += chunk + " "
179
+ message_placeholder.markdown(full_response + "▌")
180
+ message_placeholder.markdown(full_response)
181
+ except Exception as e:
182
+ logger.error(f"Error generating response: {e}")
183
+ full_response = "I apologize, but an error occurred while generating the response. Please try again."
184
+ message_placeholder.markdown(full_response)
185
 
186
+ # Add AI response to chat history
187
+ st.session_state.chat_history.append({"role": "assistant", "content": full_response})