Wajahat698 commited on
Commit
7522fcc
·
verified ·
1 Parent(s): fef6f3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -174
app.py CHANGED
@@ -1,48 +1,24 @@
1
- import logging
2
  import os
3
- import requests
4
- from dotenv import load_dotenv
5
- import gradio as gr
6
  import openai
7
- from langchain_openai import ChatOpenAI
8
- from langchain_community.vectorstores import FAISS
9
- from langchain_openai import OpenAIEmbeddings
10
- from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
11
- from langchain.agents import tool, AgentExecutor
12
- from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
13
- from langchain.agents.format_scratchpad.openai_tools import (
14
- format_to_openai_tool_messages,
15
- )
16
- from langchain_core.messages import AIMessage, HumanMessage
17
- from langchain_community.document_loaders import TextLoader
18
- from langchain_text_splitters import CharacterTextSplitter
19
- import serpapi
20
 
21
- # Initialize logging
22
- logging.basicConfig(level=logging.INFO)
23
  logger = logging.getLogger(__name__)
24
 
25
- # Load environment variables from .env file
26
- load_dotenv()
27
-
28
- # Define and validate API keys
29
  openai_api_key = os.getenv("OPENAI_API_KEY")
30
  serper_api_key = os.getenv("SERPER_API_KEY")
31
-
32
- if not openai_api_key or not serper_api_key:
33
- logger.error("API keys are not set properly.")
34
- raise ValueError("API keys for OpenAI and SERPER must be set in the .env file.")
35
- else:
36
- logger.info("API keys loaded successfully.")
37
-
38
- # Initialize OpenAI client
39
- try:
40
- openai.api_key = openai_api_key
41
- logger.info("OpenAI client initialized successfully.")
42
- except Exception as e:
43
- logger.error(f"Error initializing OpenAI client: {e}")
44
- raise e
45
-
46
 
47
  # Load knowledge base
48
  def load_knowledge_base():
@@ -56,7 +32,6 @@ def load_knowledge_base():
56
  logger.error(f"Error loading knowledge base: {e}")
57
  raise e
58
 
59
-
60
  knowledge_base = load_knowledge_base()
61
 
62
  # Initialize embeddings and FAISS index
@@ -67,7 +42,6 @@ except Exception as e:
67
  logger.error(f"Error initializing FAISS index: {e}")
68
  raise e
69
 
70
-
71
  # Define search function for knowledge base
72
  def search_knowledge_base(query):
73
  try:
@@ -77,17 +51,11 @@ def search_knowledge_base(query):
77
  logger.error(f"Error searching knowledge base: {e}")
78
  return ["Error occurred during knowledge base search"]
79
 
80
-
81
  # SERPER API Google Search function
82
  def google_search(query):
83
  try:
84
- search_client = serpapi.Client(api_key=serper_api_key)
85
- results = search_client.search(
86
- {
87
- "engine": "google",
88
- "q": query,
89
- }
90
- )
91
  snippets = [result["snippet"] for result in results.get("organic_results", [])]
92
  return snippets
93
  except requests.exceptions.HTTPError as http_err:
@@ -97,149 +65,47 @@ def google_search(query):
97
  logger.error(f"General Error: {e}")
98
  return ["Error occurred during Google search"]
99
 
100
-
101
  # RAG response function
102
  def rag_response(query):
103
  try:
104
  retrieved_docs = search_knowledge_base(query)
105
  context = "\n".join(doc.page_content for doc in retrieved_docs)
106
- prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
107
- llm = ChatOpenAI(model="gpt-4o", temperature=0.5, api_key=openai_api_key)
 
 
 
108
  response = llm.invoke(prompt)
109
  return response.content
110
  except Exception as e:
111
  logger.error(f"Error generating RAG response: {e}")
112
  return "Error occurred during RAG response generation"
113
 
114
-
115
- # Define tools using LangChain's `tool` decorator
116
- @tool
117
- def knowledge_base_tool(query: str):
118
- """
119
- Tool function to query the knowledge base and retrieve a response.
120
- Args:
121
- query (str): The query to search the knowledge base.
122
- Returns:
123
- str: The response retrieved from the knowledge base.
124
- """
125
- return rag_response(query)
126
-
127
-
128
- @tool
129
- def google_search_tool(query: str):
130
- """
131
- Tool function to perform a Google search using the SERPER API.
132
- Args:
133
- query (str): The query to search on Google.
134
- Returns:
135
- list: List of snippets extracted from search results.
136
- """
137
- return google_search(query)
138
-
139
-
140
- tools = [
141
- knowledge_base_tool,
142
- google_search_tool,
143
- ]
144
-
145
- # Create the prompt template
146
- prompt_message = """
147
- Act as an expert copywriter who specializes in creating compelling marketing copy using AI technologies.
148
- Engage in a friendly and informative conversation based on the knowledge base.
149
- Only proceed to create sales materials when the user explicitly requests it.
150
- Work together with the user to update the outcome of the sales material.
151
- """
152
- prompt_template = ChatPromptTemplate.from_messages(
153
- [
154
- ("system", prompt_message),
155
- MessagesPlaceholder(variable_name="chat_history"),
156
- ("user", "{input}"),
157
- MessagesPlaceholder(variable_name="agent_scratchpad"),
158
- ]
159
- )
160
-
161
- # Create Langchain Agent with specific model and temperature
162
- try:
163
- llm = ChatOpenAI(model="gpt-4o", temperature=0.5) # Set temperature to 0.5
164
- llm_with_tools = llm.bind_tools(tools)
165
- except Exception as e:
166
- logger.error(f"Error creating Langchain Agent: {e}")
167
-
168
- # Define the agent pipeline to handle the conversation flow
169
- try:
170
- agent = (
171
- {
172
- "input": lambda x: x["input"],
173
- "agent_scratchpad": lambda x: format_to_openai_tool_messages(
174
- x["intermediate_steps"]
175
- ),
176
- "chat_history": lambda x: x["chat_history"],
177
- }
178
- | prompt_template
179
- | llm_with_tools
180
- | OpenAIToolsAgentOutputParser()
181
- )
182
-
183
- # Instantiate an AgentExecutor to execute the defined agent pipeline
184
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
185
- except Exception as e:
186
- logger.error(f"Error defining agent pipeline: {e}")
187
-
188
  # Initialize chat history
189
- chat_history = []
190
-
191
 
192
- def chatbot_response(message, history):
 
193
  try:
194
- # Generate response using the agent executor
195
- output = agent_executor.invoke({"input": message, "chat_history": chat_history})
196
-
197
- # Save the interaction context
198
- chat_history.extend(
199
- [
200
- HumanMessage(content=message),
201
- AIMessage(content=output["output"]),
202
- ]
203
- )
204
-
205
- return output["output"]
206
  except Exception as e:
207
  logger.error(f"Error generating chatbot response: {e}")
208
  return "Error occurred during response generation"
209
 
 
 
210
 
211
- # # Define CSS for Gradio interface
212
- # CSS = """
213
- # .contain { display: flex; flex-direction: column; height: 100vh; }
214
- # #component-0 { height: 90%; }
215
- # """
216
-
217
- # # Gradio interface
218
- # with gr.Blocks(css=CSS) as demo:
219
-
220
- submit_button = gr.Button("Submit")
221
-
222
- bot = gr.Chatbot()
223
 
224
- with gr.Blocks() as demo:
225
- gr.Markdown(
226
- "<span style='font-size:20px; font-weight:bold;'>Instant Insight-2-Action</span>",
227
- visible=True,
228
- )
229
-
230
- chatbot = gr.ChatInterface(
231
- fn=chatbot_response,
232
- stop_btn=None,
233
- retry_btn=None,
234
- undo_btn=None,
235
- clear_btn=None,
236
- submit_btn=submit_button,
237
- chatbot=bot,
238
- )
239
-
240
- # Launch the Gradio app
241
- try:
242
- demo.launch(server_name="0.0.0.0")
243
- except Exception as e:
244
- logger.error(f"Error launching Gradio app: {e}")
245
- raise e
 
 
1
  import os
2
+ import streamlit as st
 
 
3
  import openai
4
+ from langchain.embeddings.openai import OpenAIEmbeddings
5
+ from langchain.vectorstores.faiss import FAISS
6
+ from langchain.document_loaders import TextLoader
7
+ from langchain.chains import LLMChain
8
+ from langchain.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
9
+ from langchain.chat_models import ChatOpenAI
10
+ from langchain.schema import HumanMessage, AIMessage
11
+ from serpapi import GoogleSearch
12
+ import logging
 
 
 
 
13
 
14
+ # Configure logging
15
+ logging.basicConfig(level=logging.ERROR)
16
  logger = logging.getLogger(__name__)
17
 
18
+ # Load environment variables
 
 
 
19
  openai_api_key = os.getenv("OPENAI_API_KEY")
20
  serper_api_key = os.getenv("SERPER_API_KEY")
21
+ openai.api_key = openai_api_key
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  # Load knowledge base
24
  def load_knowledge_base():
 
32
  logger.error(f"Error loading knowledge base: {e}")
33
  raise e
34
 
 
35
  knowledge_base = load_knowledge_base()
36
 
37
  # Initialize embeddings and FAISS index
 
42
  logger.error(f"Error initializing FAISS index: {e}")
43
  raise e
44
 
 
45
  # Define search function for knowledge base
46
  def search_knowledge_base(query):
47
  try:
 
51
  logger.error(f"Error searching knowledge base: {e}")
52
  return ["Error occurred during knowledge base search"]
53
 
 
54
  # SERPER API Google Search function
55
  def google_search(query):
56
  try:
57
+ search_client = GoogleSearch({"q": query, "api_key": serper_api_key})
58
+ results = search_client.get_dict()
 
 
 
 
 
59
  snippets = [result["snippet"] for result in results.get("organic_results", [])]
60
  return snippets
61
  except requests.exceptions.HTTPError as http_err:
 
65
  logger.error(f"General Error: {e}")
66
  return ["Error occurred during Google search"]
67
 
 
68
  # RAG response function
69
  def rag_response(query):
70
  try:
71
  retrieved_docs = search_knowledge_base(query)
72
  context = "\n".join(doc.page_content for doc in retrieved_docs)
73
+ google_results = google_search(query)
74
+ combined_context = context + "\n" + "\n".join(google_results)
75
+
76
+ prompt = f"Context:\n{combined_context}\n\nQuestion: {query}\nAnswer:"
77
+ llm = ChatOpenAI(model="gpt-4", temperature=0.5, api_key=openai_api_key)
78
  response = llm.invoke(prompt)
79
  return response.content
80
  except Exception as e:
81
  logger.error(f"Error generating RAG response: {e}")
82
  return "Error occurred during RAG response generation"
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  # Initialize chat history
85
+ if 'chat_history' not in st.session_state:
86
+ st.session_state.chat_history = []
87
 
88
+ # Function to handle chat responses
89
+ def chatbot_response(message):
90
  try:
91
+ response = rag_response(message)
92
+ st.session_state.chat_history.append(HumanMessage(content=message))
93
+ st.session_state.chat_history.append(AIMessage(content=response))
94
+ return response
 
 
 
 
 
 
 
 
95
  except Exception as e:
96
  logger.error(f"Error generating chatbot response: {e}")
97
  return "Error occurred during response generation"
98
 
99
+ # Streamlit UI setup
100
+ st.title("Instant Insight-2-Action")
101
 
102
+ prompt = st.chat_input("Type your prompt here...", key="unique_chat_input_key")
103
+ if prompt:
104
+ response = chatbot_response(prompt)
 
 
 
 
 
 
 
 
 
105
 
106
+ # Display chat history
107
+ for msg in st.session_state.chat_history:
108
+ if isinstance(msg, HumanMessage):
109
+ st.write(f"You: {msg.content}")
110
+ elif isinstance(msg, AIMessage):
111
+ st.write(f"Bot: {msg.content}")