Spaces:
Sleeping
Sleeping
File size: 16,729 Bytes
6a7793f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 | # 12.2
# ================================================================
# FILE: chat_agent.py
# ---------------------------------------------------------------
# FoodHub Conversational Assistant (Groq-exclusive version)
# ---------------------------------------------------------------
# PURPOSE:
# - Handles all user-facing chat interactions for FoodHub.
# - Uses Groq-hosted LLaMA 4 model for short (<80 words), polite,
# and context-aware responses.
# - Detects intent (promo, refund, handoff, farewell, etc.)
# and responds accordingly.
# - Enforces data privacy and safety policies.
# ================================================================
import os
import re
import streamlit as st
import sys
from langchain_groq import ChatGroq
from langchain.agents import initialize_agent, Tool
from langchain_core.messages import SystemMessage, HumanMessage
from langchain.agents.agent_types import AgentType
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# ================================================================
# SECTION 1: LLM Initialization (Low Temperature)
# ---------------------------------------------------------------
# Purpose:
# Sets up a deterministic Groq-powered Large Language Model (LLM)
# with low temperature (0.0) for predictable and consistent outputs.
# Fetches the API key securely from Streamlit secrets or environment
# variables and stops execution if missing.
# ================================================================
@st.cache_resource
def initialize_llm_high():
"""
Initialize the Groq-based LLM with high creativity (temperature = 0.7).
Workflow:
1️⃣ Retrieve Groq API key (from Streamlit secrets or environment variable).
2️⃣ Validate key existence; stop execution if not found.
3️⃣ Configure and return a ChatGroq instance for deterministic responses.
"""
# ------------------------------------------------------------
# Step 1: Retrieve Groq API Key
# Attempt to load the API key securely from Streamlit secrets;
# if not found, fallback to system environment variable.
# ------------------------------------------------------------
try:
groq_api_key = st.secrets["GROQ_API_KEY"]
except:
groq_api_key = os.getenv("GROQ_API_KEY")
# ------------------------------------------------------------
# Step 2: Validate API Key
# If the key is missing, display a helpful error message
# and stop further execution to prevent runtime failures.
# ------------------------------------------------------------
if not groq_api_key:
st.error("⚠️ GROQ_API_KEY Environment Variable Not Found! Please set the environment variable.")
st.info("Please create a `.streamlit/secrets.toml` file with:\n```\nGROQ_API_KEY = \"your-api-key-here\"\n```")
st.stop()
# ------------------------------------------------------------
# Step 3: Configure and Initialize Groq LLM
# Create a ChatGroq instance using a high-temperature setup
# for Conversational and natural sounding responses.
# ------------------------------------------------------------
llmh = ChatGroq(
model="meta-llama/llama-4-scout-17b-16e-instruct", # Groq-hosted LLaMA model
temperature=0.7, # High temperature → Conversational output
max_tokens=200, # Limit response size
max_retries=0, # No automatic retries
groq_api_key=groq_api_key # Secure API key injection
)
# ------------------------------------------------------------
# Step 4: Return Cached LLM Instance
# The LLM object is cached to avoid reinitialization overhead.
# ------------------------------------------------------------
return llmh
# ================================================================
# SECTION 2: Create Global LLM Instance
# ---------------------------------------------------------------
# Initializes the cached High-temperature LLM for consistent use
# across the Streamlit app pipeline for conversational response.
# ================================================================
llm_high = initialize_llm_high()
# ================================================================
# SECTION 3: Escalation Detection
# ---------------------------------------------------------------
# Purpose:
# Identifies user queries that indicate unresolved issues,
# urgency, dissatisfaction, or explicit requests to speak
# with a human support representative.
# Helps route critical or frustrated customer messages
# to human agents for faster resolution.
# ================================================================
def check_escalation(user_query: str) -> str:
"""
Detects whether a user's message requires escalation to human support.
Logic:
- Scans the user query for specific keywords or phrases that suggest:
* Repeated complaints or unresolved issues.
* Requests for urgent or immediate attention.
* Direct mentions of escalation, dissatisfaction, or need for human help.
- Returns:
* "Escalated" → if any escalation keyword is detected.
* "Not Escalated" → if no escalation indicators are present.
"""
# ------------------------------------------------------------
# Step 1: Define escalation-related keywords and phrases
# These capture user frustration, urgency, or explicit escalation intent.
# ------------------------------------------------------------
escalation_kw_list = [
"issue persists", "not resolved", "complaint", "contact human",
"priority", "immediate", "service failure", "speak to manager",
"support required", "help me now", "not satisfied", "request escalation",
"critical issue", "issue unresolved", "need assistance", "escalation",
"problem still exists", "no response", "cannot resolve", "urgent",
"multiple times", "immediate response", "problem", "escalate",
"still not working"
]
# ------------------------------------------------------------
# Step 2: Check for escalation triggers in the user’s query
# Perform a case-insensitive match of any keyword in the query text.
# ------------------------------------------------------------
if any(keyword in user_query.lower() for keyword in escalation_kw_list):
return "Escalated" # 🚨 Escalation required — route to human support
# ------------------------------------------------------------
# Step 3: No escalation keywords found — proceed normally
# ------------------------------------------------------------
return "Not Escalated"
# ================================================================
# SECTION 4: Order Cancellation Handler
# ---------------------------------------------------------------
# Purpose:
# Processes and validates customer cancellation requests
# based on the current order status.
# Ensures cancellations are not permitted for orders that
# are already delivered, canceled, or beyond the preparation stage.
# ================================================================
def handle_cancellation(user_query: str, raw_orders: str, order_status: str) -> str:
"""
Handles customer order cancellation requests logically and politely.
Logic:
- Identifies if the user’s message contains a cancellation intent.
- Evaluates the current order status and determines whether cancellation
is still possible.
- Returns a context-appropriate message explaining the outcome.
"""
# ------------------------------------------------------------
# Step 1: Detect cancellation intent in the user’s query
# If the message doesn’t contain the word “cancel”, skip processing.
# ------------------------------------------------------------
if "cancel" not in user_query.lower():
return ""
# ------------------------------------------------------------
# Step 2: Check if order is already completed or canceled
# In such cases, cancellation cannot be performed again.
# ------------------------------------------------------------
if order_status and order_status.lower() in ["delivered", "canceled"]:
return (
f"Your order has already been {order_status.lower()}. "
"Cancellation is therefore not possible. We appreciate your understanding!"
)
# ------------------------------------------------------------
# Step 3: Check if order is already being prepared or picked up
# Once food preparation or pickup starts, cancellations are disallowed.
# ------------------------------------------------------------
elif order_status and order_status.lower() in ["preparing food", "picked up"]:
return (
f"Your order is currently {order_status.lower()}. "
"Unfortunately, cancellations are not permitted at this stage. Thank you for your understanding!"
)
# ------------------------------------------------------------
# Step 4: Default case — cancellation not allowed for unspecified reasons
# ------------------------------------------------------------
else:
return (
"Your order cannot be canceled at this moment. "
"We appreciate your patience and look forward to serving you again!"
)
# ================================================================
# SECTION 5: Answer Tool — Final Response Generator
# ---------------------------------------------------------------
# Purpose:
# Processes the structured output from `OrderQueryTool`,
# interprets order details, applies escalation or cancellation logic,
# and generates a natural, customer-friendly response using the LLM.
# ================================================================
# ----------------------------------------------------------------
# Function: answer_tool_func()
# Description:
# - Receives a stringified dictionary from the previous tool.
# - Parses and validates it.
# - Checks for escalation or cancellation triggers.
# - Uses the LLM to craft the final user-facing message.
# ----------------------------------------------------------------
def answer_tool_func(answertool_input: str) -> str:
"""
Receives the output from OrderQueryTool as stringified dict,
parses it, and generates the final friendly message.
"""
# ------------------------------------------------------------
# Step 1: Parse the input dictionary safely
# ------------------------------------------------------------
try:
data = ast.literal_eval(answertool_input)
cust_id = data.get("cust_id", "Unknown")
user_query = data.get("orig_query", "")
db_response = data.get("db_response", "No order details found.")
except Exception:
# Handle invalid or malformed data gracefully
return "⚠️ Error: Could not parse order data properly."
# Initialize key order-related variables
order_status = None
item_in_order = None
preparing_eta = None
delivery_time = None
print('answer_tool_func : LEVEL-1 Done',flush=True)
print('cust_id = ',cust_id, flush=True)
print('orig_query = ',user_query, flush=True)
print('db_response = ',db_response, flush=True)
sys.stdout.flush()
# ------------------------------------------------------------
# Step 2: Extract order details from db_response text
# ------------------------------------------------------------
for line in db_response.splitlines():
if "Order Status" in line:
order_status = line.split(":", 1)[1].strip()
elif "Preparing ETA" in line:
preparing_eta = line.split(":", 1)[1].strip()
elif "Delivery Time" in line:
delivery_time = line.split(":", 1)[1].strip()
# ------------------------------------------------------------
# Step 3: Detect if query needs escalation (critical or unresolved issues)
# ------------------------------------------------------------
escalation_var = check_escalation(user_query)
if escalation_var == "Escalated":
return (
f"The current status of your order is: {order_status.lower()}. " +
"⚠️ This issue needs urgent attention. " +
"Your request has been escalated to a human support agent who will reach out to you soon."
)
#print('answer_tool_func : LEVEL-2 Done',flush=True)
#sys.stdout.flush()
# ------------------------------------------------------------
# Step 4: Check for order cancellation requests
# ------------------------------------------------------------
cancel_response = handle_cancellation(user_query, db_response, order_status)
if cancel_response: # Return cancellation message if applicable
return cancel_response
#print('answer_tool_func : LEVEL-3 Done',flush=True)
#sys.stdout.flush()
#return "Forced: Thank you and your order conatins Steak..!"
# ------------------------------------------------------------
# Step 5: Build the system prompt for LLM to interpret and respond
# ------------------------------------------------------------
system_prompt = f"""
You are a warm and helpful customer support assistant for FoodHub.
Customer ID: {cust_id}
Below is the customer's order information retrieved from the database:
{db_response}
Sample raw_orders format:
order_id: O12493,
cust_id: C1018,
order_time: 12:35,
order_status: picked up,
payment_status: COD,
item_in_order: Steak,
preparing_eta: 12:50,
prepared_time: 12:50,
delivery_eta: 1:10,
delivery_time: None
Response Instructions:
1. Respond in a friendly, natural, and concise tone — keep replies short.
2. Use only the details from `db_response`. Do not infer or create extra info.
3. Convert database text into polite, human-readable responses.
4. When order_status = 'preparing food':
- Include both 'preparing_eta' and 'delivery_eta'.
- If 'delivery_eta' is missing or None, say: "Your order is being prepared, and the delivery ETA will be available soon."
5. When order_status = 'delivered', include 'delivery_time' in the message.
6. When order_status = 'canceled', explain politely and empathetically.
7. When order_status = 'picked up':
- Include 'delivery_eta' if available.
- If 'delivery_eta' is missing or None, say: "Your order has been picked up, and the delivery ETA will be available soon."
8. If the user query contains “Where is my order”, include the current 'order_status'.
9. If the user query includes “How many items”, count the 'item_in_order' list and reply like:
"Your order includes 3 items."
"""
# ------------------------------------------------------------
# Step 6: Build and send user-specific prompt to LLM
# ------------------------------------------------------------
user_prompt = f"User Query: {user_query}"
# Generate final response using the configured LLM
response_msg = llm_high.predict_messages([
SystemMessage(content=system_prompt),
HumanMessage(content=user_prompt)
])
# ------------------------------------------------------------
# Step 7: Clean and finalize the LLM response
# ------------------------------------------------------------
response = response_msg.content.strip()
#print('answer_tool_func : LEVEL-4 Done; response = ',response, flush=True)
#sys.stdout.flush()
# Provide fallback message in case of empty or invalid response
if not response:
return "Sorry, we could not extract your order details at this time. Please try again later.."
# Return the final generated response
return response
# ================================================================
# SECTION 6: LangChain Tool Wrapper
# ---------------------------------------------------------------
# Wraps the chat handler as a LangChain Tool so that it can be
# called within multi-agent workflows or pipelines.
# ================================================================
#AnswerTool = Tool(
# name="answer_tool",
# func=answer_tool_func,
# description="Format raw DB results into a brief, polite user-facing message. Enforces business rules (cancelled/completed messaging, escalation)."
#)
|