Spaces:
Sleeping
Sleeping
Updated the app
Browse files
app.py
CHANGED
|
@@ -10,105 +10,96 @@ from langchain_core.prompts import ChatPromptTemplate
|
|
| 10 |
from langchain_openai import ChatOpenAI
|
| 11 |
from browser_use import Agent
|
| 12 |
|
| 13 |
-
# Load environment variables (including OPENAI_API_KEY) from .env
|
| 14 |
load_dotenv()
|
| 15 |
|
| 16 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
class State(TypedDict):
|
| 18 |
query: str
|
| 19 |
category: str
|
| 20 |
sentiment: str
|
| 21 |
response: str
|
| 22 |
|
| 23 |
-
def init_llm(api_key: str):
|
| 24 |
-
os.environ["OPENAI_API_KEY"] = api_key
|
| 25 |
-
return ChatOpenAI(temperature=0)
|
| 26 |
-
|
| 27 |
-
def process_query(api_key: str, query: str):
|
| 28 |
-
try:
|
| 29 |
-
llm = init_llm(api_key)
|
| 30 |
-
agent = Agent(
|
| 31 |
-
task=query,
|
| 32 |
-
llm=llm
|
| 33 |
-
)
|
| 34 |
-
result = asyncio.run(agent.run())
|
| 35 |
-
return result
|
| 36 |
-
except Exception as e:
|
| 37 |
-
return f"Error: {str(e)}"
|
| 38 |
-
|
| 39 |
-
with gr.Blocks() as demo:
|
| 40 |
-
gr.Markdown("# Customer Support Agent")
|
| 41 |
-
with gr.Row():
|
| 42 |
-
api_key = gr.Textbox(
|
| 43 |
-
label="OpenAI API Key",
|
| 44 |
-
placeholder="Enter your OpenAI API key",
|
| 45 |
-
type="password"
|
| 46 |
-
)
|
| 47 |
-
with gr.Row():
|
| 48 |
-
query = gr.Textbox(
|
| 49 |
-
label="Query",
|
| 50 |
-
placeholder="Enter your query here"
|
| 51 |
-
)
|
| 52 |
-
with gr.Row():
|
| 53 |
-
submit = gr.Button("Submit")
|
| 54 |
-
with gr.Row():
|
| 55 |
-
output = gr.Textbox(label="Response")
|
| 56 |
-
|
| 57 |
-
submit.click(
|
| 58 |
-
fn=process_query,
|
| 59 |
-
inputs=[api_key, query],
|
| 60 |
-
outputs=output
|
| 61 |
-
)
|
| 62 |
-
# Initialize our language models.
|
| 63 |
-
# We use llm_standard for normal tasks and llm_browser for browser-based tasks.
|
| 64 |
-
llm_standard = ChatOpenAI(temperature=0)
|
| 65 |
-
llm_browser = ChatOpenAI(model="gpt-4o", temperature=0)
|
| 66 |
|
| 67 |
-
#
|
|
|
|
|
|
|
|
|
|
| 68 |
def categorize(state: State) -> State:
|
| 69 |
prompt = ChatPromptTemplate.from_template(
|
| 70 |
"Categorize the following customer query into one of these categories: "
|
| 71 |
"Technical, Billing, General. Query: {query}"
|
| 72 |
)
|
| 73 |
-
chain = prompt |
|
| 74 |
category = chain.invoke({"query": state["query"]}).content.strip()
|
| 75 |
-
|
|
|
|
| 76 |
|
| 77 |
def analyze_sentiment(state: State) -> State:
|
| 78 |
prompt = ChatPromptTemplate.from_template(
|
| 79 |
"Analyze the sentiment of the following customer query. "
|
| 80 |
"Respond with either 'Positive', 'Neutral', or 'Negative'. Query: {query}"
|
| 81 |
)
|
| 82 |
-
chain = prompt |
|
| 83 |
sentiment = chain.invoke({"query": state["query"]}).content.strip()
|
| 84 |
-
|
|
|
|
| 85 |
|
| 86 |
def handle_technical(state: State) -> State:
|
| 87 |
prompt = ChatPromptTemplate.from_template(
|
| 88 |
"Provide a technical support response to the following query: {query}"
|
| 89 |
)
|
| 90 |
-
chain = prompt |
|
| 91 |
response = chain.invoke({"query": state["query"]}).content.strip()
|
| 92 |
-
|
|
|
|
| 93 |
|
| 94 |
def handle_billing(state: State) -> State:
|
| 95 |
prompt = ChatPromptTemplate.from_template(
|
| 96 |
"Provide a billing support response to the following query: {query}"
|
| 97 |
)
|
| 98 |
-
chain = prompt |
|
| 99 |
response = chain.invoke({"query": state["query"]}).content.strip()
|
| 100 |
-
|
|
|
|
| 101 |
|
| 102 |
async def run_browser_agent(task: str) -> str:
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
| 105 |
result = await agent.run()
|
| 106 |
return result
|
| 107 |
|
| 108 |
def handle_general(state: State) -> State:
|
| 109 |
"""
|
| 110 |
For general queries, we use the browser agent to consult online resources.
|
| 111 |
-
We call the async function with asyncio.run and then extract only the final answer.
|
| 112 |
"""
|
| 113 |
task = (
|
| 114 |
"You are a customer support agent that consults online sources. "
|
|
@@ -116,7 +107,7 @@ def handle_general(state: State) -> State:
|
|
| 116 |
)
|
| 117 |
result = asyncio.run(run_browser_agent(task))
|
| 118 |
final_text = ""
|
| 119 |
-
|
| 120 |
if isinstance(result, str):
|
| 121 |
final_text = result.strip()
|
| 122 |
elif hasattr(result, "all_results"):
|
|
@@ -130,14 +121,18 @@ def handle_general(state: State) -> State:
|
|
| 130 |
final_text = str(result).strip()
|
| 131 |
else:
|
| 132 |
final_text = str(result).strip()
|
| 133 |
-
|
| 134 |
-
|
|
|
|
| 135 |
|
| 136 |
def escalate(state: State) -> State:
|
| 137 |
-
|
|
|
|
| 138 |
|
| 139 |
def route_query(state: State) -> str:
|
| 140 |
-
"""
|
|
|
|
|
|
|
| 141 |
if state["sentiment"].lower() == "negative":
|
| 142 |
return "escalate"
|
| 143 |
elif state["category"].lower() == "technical":
|
|
@@ -147,7 +142,10 @@ def route_query(state: State) -> str:
|
|
| 147 |
else:
|
| 148 |
return "handle_general"
|
| 149 |
|
| 150 |
-
|
|
|
|
|
|
|
|
|
|
| 151 |
workflow = StateGraph(State)
|
| 152 |
workflow.add_node("categorize", categorize)
|
| 153 |
workflow.add_node("analyze_sentiment", analyze_sentiment)
|
|
@@ -172,31 +170,52 @@ workflow.add_edge("handle_billing", END)
|
|
| 172 |
workflow.add_edge("handle_general", END)
|
| 173 |
workflow.add_edge("escalate", END)
|
| 174 |
workflow.set_entry_point("categorize")
|
|
|
|
|
|
|
| 175 |
app = workflow.compile()
|
| 176 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
async def run_customer_support(query: str, api_key: str = "") -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
if not api_key and not os.getenv("OPENAI_API_KEY"):
|
| 179 |
-
return "Error: Please provide an OpenAI API key"
|
| 180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
try:
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
)
|
| 190 |
-
|
| 191 |
-
return result
|
| 192 |
except Exception as e:
|
| 193 |
return f"Error: {str(e)}"
|
| 194 |
|
| 195 |
|
| 196 |
-
#
|
|
|
|
|
|
|
| 197 |
with gr.Blocks(title="Customer Support Agent with Browser Use") as demo:
|
| 198 |
gr.Markdown("# Customer Support Agent with Browser Use")
|
| 199 |
-
gr.Markdown(
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
with gr.Row():
|
| 202 |
with gr.Column():
|
|
@@ -218,10 +237,15 @@ with gr.Blocks(title="Customer Support Agent with Browser Use") as demo:
|
|
| 218 |
interactive=False
|
| 219 |
)
|
| 220 |
|
|
|
|
| 221 |
submit_btn.click(
|
| 222 |
fn=run_customer_support,
|
| 223 |
inputs=[query_input, api_key_input],
|
| 224 |
outputs=output_box
|
| 225 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 226 |
if __name__ == "__main__":
|
| 227 |
-
demo.launch()
|
|
|
|
| 10 |
from langchain_openai import ChatOpenAI
|
| 11 |
from browser_use import Agent
|
| 12 |
|
| 13 |
+
# Load environment variables (including OPENAI_API_KEY) from .env if present.
|
| 14 |
load_dotenv()
|
| 15 |
|
| 16 |
+
# -------------------------------------------------------
|
| 17 |
+
# Helpers to get ChatOpenAI models using the environment key
|
| 18 |
+
# -------------------------------------------------------
|
| 19 |
+
def get_llm():
|
| 20 |
+
"""
|
| 21 |
+
Returns a ChatOpenAI instance using the OPENAI_API_KEY from environment.
|
| 22 |
+
"""
|
| 23 |
+
return ChatOpenAI(
|
| 24 |
+
temperature=0,
|
| 25 |
+
openai_api_key=os.getenv("OPENAI_API_KEY")
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
def get_llm_browser():
|
| 29 |
+
"""
|
| 30 |
+
Returns a ChatOpenAI instance (GPT-4 or your custom model)
|
| 31 |
+
using the OPENAI_API_KEY from environment.
|
| 32 |
+
"""
|
| 33 |
+
return ChatOpenAI(
|
| 34 |
+
model="gpt-4o", # Adjust the model name as needed
|
| 35 |
+
temperature=0,
|
| 36 |
+
openai_api_key=os.getenv("OPENAI_API_KEY")
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# -------------------------------------------------------
|
| 41 |
+
# TypedDict for State
|
| 42 |
+
# -------------------------------------------------------
|
| 43 |
class State(TypedDict):
|
| 44 |
query: str
|
| 45 |
category: str
|
| 46 |
sentiment: str
|
| 47 |
response: str
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
+
# -------------------------------------------------------
|
| 51 |
+
# Node functions for our workflow
|
| 52 |
+
# (using get_llm() or get_llm_browser() on-demand)
|
| 53 |
+
# -------------------------------------------------------
|
| 54 |
def categorize(state: State) -> State:
|
| 55 |
prompt = ChatPromptTemplate.from_template(
|
| 56 |
"Categorize the following customer query into one of these categories: "
|
| 57 |
"Technical, Billing, General. Query: {query}"
|
| 58 |
)
|
| 59 |
+
chain = prompt | get_llm()
|
| 60 |
category = chain.invoke({"query": state["query"]}).content.strip()
|
| 61 |
+
state["category"] = category
|
| 62 |
+
return state
|
| 63 |
|
| 64 |
def analyze_sentiment(state: State) -> State:
|
| 65 |
prompt = ChatPromptTemplate.from_template(
|
| 66 |
"Analyze the sentiment of the following customer query. "
|
| 67 |
"Respond with either 'Positive', 'Neutral', or 'Negative'. Query: {query}"
|
| 68 |
)
|
| 69 |
+
chain = prompt | get_llm()
|
| 70 |
sentiment = chain.invoke({"query": state["query"]}).content.strip()
|
| 71 |
+
state["sentiment"] = sentiment
|
| 72 |
+
return state
|
| 73 |
|
| 74 |
def handle_technical(state: State) -> State:
|
| 75 |
prompt = ChatPromptTemplate.from_template(
|
| 76 |
"Provide a technical support response to the following query: {query}"
|
| 77 |
)
|
| 78 |
+
chain = prompt | get_llm()
|
| 79 |
response = chain.invoke({"query": state["query"]}).content.strip()
|
| 80 |
+
state["response"] = response
|
| 81 |
+
return state
|
| 82 |
|
| 83 |
def handle_billing(state: State) -> State:
|
| 84 |
prompt = ChatPromptTemplate.from_template(
|
| 85 |
"Provide a billing support response to the following query: {query}"
|
| 86 |
)
|
| 87 |
+
chain = prompt | get_llm()
|
| 88 |
response = chain.invoke({"query": state["query"]}).content.strip()
|
| 89 |
+
state["response"] = response
|
| 90 |
+
return state
|
| 91 |
|
| 92 |
async def run_browser_agent(task: str) -> str:
|
| 93 |
+
"""
|
| 94 |
+
Helper to run the browser-use Agent asynchronously.
|
| 95 |
+
"""
|
| 96 |
+
agent = Agent(task=task, llm=get_llm_browser())
|
| 97 |
result = await agent.run()
|
| 98 |
return result
|
| 99 |
|
| 100 |
def handle_general(state: State) -> State:
|
| 101 |
"""
|
| 102 |
For general queries, we use the browser agent to consult online resources.
|
|
|
|
| 103 |
"""
|
| 104 |
task = (
|
| 105 |
"You are a customer support agent that consults online sources. "
|
|
|
|
| 107 |
)
|
| 108 |
result = asyncio.run(run_browser_agent(task))
|
| 109 |
final_text = ""
|
| 110 |
+
|
| 111 |
if isinstance(result, str):
|
| 112 |
final_text = result.strip()
|
| 113 |
elif hasattr(result, "all_results"):
|
|
|
|
| 121 |
final_text = str(result).strip()
|
| 122 |
else:
|
| 123 |
final_text = str(result).strip()
|
| 124 |
+
|
| 125 |
+
state["response"] = final_text
|
| 126 |
+
return state
|
| 127 |
|
| 128 |
def escalate(state: State) -> State:
|
| 129 |
+
state["response"] = "This query has been escalated to a human agent due to negative sentiment."
|
| 130 |
+
return state
|
| 131 |
|
| 132 |
def route_query(state: State) -> str:
|
| 133 |
+
"""
|
| 134 |
+
Determine which node to route to based on sentiment and category.
|
| 135 |
+
"""
|
| 136 |
if state["sentiment"].lower() == "negative":
|
| 137 |
return "escalate"
|
| 138 |
elif state["category"].lower() == "technical":
|
|
|
|
| 142 |
else:
|
| 143 |
return "handle_general"
|
| 144 |
|
| 145 |
+
|
| 146 |
+
# -------------------------------------------------------
|
| 147 |
+
# Create the workflow graph
|
| 148 |
+
# -------------------------------------------------------
|
| 149 |
workflow = StateGraph(State)
|
| 150 |
workflow.add_node("categorize", categorize)
|
| 151 |
workflow.add_node("analyze_sentiment", analyze_sentiment)
|
|
|
|
| 170 |
workflow.add_edge("handle_general", END)
|
| 171 |
workflow.add_edge("escalate", END)
|
| 172 |
workflow.set_entry_point("categorize")
|
| 173 |
+
|
| 174 |
+
# Compile the workflow into a callable function
|
| 175 |
app = workflow.compile()
|
| 176 |
|
| 177 |
+
|
| 178 |
+
# -------------------------------------------------------
|
| 179 |
+
# Gradio callback function
|
| 180 |
+
# -------------------------------------------------------
|
| 181 |
async def run_customer_support(query: str, api_key: str = "") -> str:
|
| 182 |
+
"""
|
| 183 |
+
Main function called by Gradio upon submit.
|
| 184 |
+
- If user provided an API key, set it in the environment.
|
| 185 |
+
- Then run the workflow from 'app' on the user's query.
|
| 186 |
+
- Return the final response from the workflow.
|
| 187 |
+
"""
|
| 188 |
+
# If no key is provided by user and none is in environment, show error.
|
| 189 |
if not api_key and not os.getenv("OPENAI_API_KEY"):
|
| 190 |
+
return "Error: Please provide an OpenAI API key."
|
| 191 |
+
|
| 192 |
+
# Set user-provided key in environment
|
| 193 |
+
if api_key:
|
| 194 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
| 195 |
+
|
| 196 |
try:
|
| 197 |
+
# Initialize the state
|
| 198 |
+
state = {
|
| 199 |
+
"query": query,
|
| 200 |
+
"category": "",
|
| 201 |
+
"sentiment": "",
|
| 202 |
+
"response": ""
|
| 203 |
+
}
|
| 204 |
+
final_state = app(state)
|
| 205 |
+
return final_state["response"]
|
|
|
|
| 206 |
except Exception as e:
|
| 207 |
return f"Error: {str(e)}"
|
| 208 |
|
| 209 |
|
| 210 |
+
# -------------------------------------------------------
|
| 211 |
+
# Build the Gradio UI
|
| 212 |
+
# -------------------------------------------------------
|
| 213 |
with gr.Blocks(title="Customer Support Agent with Browser Use") as demo:
|
| 214 |
gr.Markdown("# Customer Support Agent with Browser Use")
|
| 215 |
+
gr.Markdown(
|
| 216 |
+
"This agent categorizes customer queries and uses a browser-based agent "
|
| 217 |
+
"to provide informed answers (when the query is general)."
|
| 218 |
+
)
|
| 219 |
|
| 220 |
with gr.Row():
|
| 221 |
with gr.Column():
|
|
|
|
| 237 |
interactive=False
|
| 238 |
)
|
| 239 |
|
| 240 |
+
# Note: The order of inputs here matches the function signature.
|
| 241 |
submit_btn.click(
|
| 242 |
fn=run_customer_support,
|
| 243 |
inputs=[query_input, api_key_input],
|
| 244 |
outputs=output_box
|
| 245 |
)
|
| 246 |
+
|
| 247 |
+
# -------------------------------------------------------
|
| 248 |
+
# Launch in local or Hugging Face Spaces
|
| 249 |
+
# -------------------------------------------------------
|
| 250 |
if __name__ == "__main__":
|
| 251 |
+
demo.launch()
|