praneeth300 commited on
Commit
44da592
·
verified ·
1 Parent(s): 091f71a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +251 -0
app.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sqlite3
3
+ import pandas as pd
4
+ import os
5
+ import json
6
+ from langchain.agents import Tool, initialize_agent
7
+ from langchain.agents.agent_types import AgentType
8
+ from groq import Groq
9
+ from langchain_groq import ChatGroq
10
+ from langchain_community.utilities.sql_database import SQLDatabase
11
+ from langchain_community.agent_toolkits import create_sql_agent
12
+ from langchain.schema import HumanMessage
13
+
14
+ groq_api_key = userdata.get("GROQ_API_KEY")
15
+ client = Groq(api_key=groq_api_key)
16
+
17
+ llm = ChatGroq(
18
+ groq_api_key=groq_api_key,
19
+ model_name="meta-llama/llama-4-scout-17b-16e-instruct"
20
+ )
21
+
22
+ connection = sqlite3.connect("kartify.db", check_same_thread=False)
23
+ kartify_db = SQLDatabase.from_uri("sqlite:///kartify.db")
24
+ sqlite_agent = create_sql_agent(llm, db=kartify_db, agent_type="openai-tools", verbose=False)
25
+
26
+ def policy_tool_func(input: str) -> str:
27
+ prompt = f"""Only respond about return or replacement if the user has explicitly asked about it in their query.
28
+ Use the following context from order, shipment, and product policy data:
29
+ {input}
30
+ Your task (only if return or replacement is mentioned):
31
+ 1. Check eligibility based on `actual_delivery` and product policy:
32
+ - If `return_days_allowed` is 0, clearly state the product is not eligible for return.
33
+ - If within window, mention last date allowed for return and replacement.
34
+ - If the window has passed, say so.
35
+ 2. Mention return conditions (e.g., “Sealed only”).
36
+ 3. If `actual_delivery` is null, return/replacement is not yet applicable.
37
+ 4. If any required info is missing, say so politely that i am connecting to human support.
38
+ If the query does **not** mention return or replacement, do **not** include any information about it in your response.
39
+ Respond clearly and briefly no system or SQL steps."""
40
+ return llm([HumanMessage(content=prompt)]).content.strip()
41
+
42
+ def answer_generation_tool(input: str) -> str:
43
+ prompt = f"""You are a polite and formal customer assistant replying to a user query to customer about return, replacement, delivery, cancellation, or address change.
44
+ Use the context provided below:
45
+ {input}
46
+ Guidelines:
47
+ - Respond in a short, formal, and factual tone.
48
+ - Do **not** add extra details that were not asked in the user's query.
49
+ - Do **not** format like an email avoid greetings, sign-offs, or explanations.
50
+ - Do not offer return/replacement windows unless directly asked.
51
+ - Do not mention cancellation or refund policies unless cancellation is requested.
52
+ - Do not mention address change policy until asked.
53
+ - If the product has **not been delivered**, mention that return/replacement cannot be processed yet.
54
+ - If the product **has been delivered**, calculate based on delivery date whether return or replacement is still allowed.
55
+ - If the order has already **shipped**, reply that address changes are not possible.
56
+ - If any required info is missing, say so politely that i am connecting to human support.
57
+ - If you do not what to answer now, say so politely that i am connecting to human support.
58
+ - Never ask for any bank details
59
+ Output:
60
+ - Return only a single, relevant customer-facing message no system instructions, reasoning, or metadata.
61
+ """
62
+ return llm([HumanMessage(content=prompt)]).content.strip()
63
+
64
+ def output_guard_check(model_output: str) -> str:
65
+ prompt = f"""
66
+ You are a content safety assistant. Your task is to classify if the assistant's response is appropriate.
67
+ If the message contains:
68
+ - Requests for bank details, OTPs, account numbers
69
+ - Harassment or offensive tone
70
+ - Privacy concerns or unsafe advice
71
+ - Misunderstanding and miscommunication word
72
+ - Phrases like "please contact customer service" or redirection to a human agent
73
+ - Escalated this to our support team
74
+ Return: BLOCK
75
+ Otherwise, return: SAFE
76
+ Response: {model_output}
77
+ Output:
78
+ """
79
+ return llm.predict(prompt).strip()
80
+
81
+ def evaluate_response_quality(context: str, query: str, response: str) -> dict:
82
+ prompt = f"""Evaluate the assistant's response to a customer query using the provided order context.
83
+
84
+ Context: {context}
85
+ Customer Query: {query}
86
+ Assistant's Response: {response}
87
+
88
+ Instructions:
89
+ 1. **Groundedness (0.0 to 1.0)**: Score based on how well the response is factually supported by the context.
90
+ - Score closer to 1 if all facts are accurate and derived from the context.
91
+ - Score closer to 0 if there is hallucination, guesswork, or any fabricated information.
92
+
93
+ 2. **Precision (0.0 to 1.0)**: Score based on how directly and accurately the assistant addresses the query.
94
+ - Score closer to 1 if the response is concise, focused, and answers the exact user query.
95
+ - Score closer to 0 if it includes irrelevant details or misses the main point.
96
+
97
+ Output format (JSON only):
98
+
99
+ groundedness: float between 0 and 1 ,
100
+ precision: float between 0 and 1
101
+
102
+ Only return the JSON. No explanations.
103
+
104
+ """
105
+ score = llm.predict(prompt).strip()
106
+ try:
107
+ return eval(score)
108
+ except:
109
+ return {"groundedness": 0.0, "precision": 0.0}
110
+
111
+
112
+ def conversation_guard_check(history) -> str:
113
+ chat_summary = "\n".join([f"Customer: {h['user']}\nAssistant: {h['assistant']}" for h in history])
114
+ prompt = f"""
115
+ You are a conversation monitor AI. Review the entire conversation and classify if the assistant:
116
+ - Repeatedly offered unnecessary return or replacement steps
117
+ - Gave more than what the user asked
118
+ - Missed signs of customer distress
119
+ - Ignored user's refusal of an option
120
+ If any of the above are TRUE, return BLOCK
121
+ Else, return SAFE
122
+ Conversation:
123
+ {chat_summary}
124
+ Output:
125
+ """
126
+ return llm.predict(prompt).strip()
127
+
128
+ tools = [
129
+ Tool(name="PolicyChecker", func=policy_tool_func, description="Check return and replacement eligibility."),
130
+ Tool(name="AnswerGenerator", func=answer_generation_tool, description="Craft final response.")
131
+ ]
132
+
133
+ order_agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=False, handle_parsing_errors=True)
134
+
135
+ st.title("📦 Kartify Order Query Chatbot")
136
+
137
+ customer_id = st.text_input("Enter your Customer ID:")
138
+
139
+ if customer_id:
140
+ query = """
141
+ SELECT
142
+ order_id,
143
+ product_description
144
+ FROM
145
+ orders
146
+ WHERE
147
+ customer_id = ?
148
+ ORDER BY order_date DESC
149
+ """
150
+ df = pd.read_sql_query(query, connection, params=(customer_id,))
151
+
152
+ if not df.empty:
153
+ selected_order = st.selectbox("Select your Order:", df["order_id"] + " - " + df["product_description"])
154
+ start_chat = st.button("Start Chat")
155
+
156
+ if start_chat:
157
+ # Reset chat state except customer ID and order ID
158
+ st.session_state.chat_history = []
159
+ st.session_state.order_id = selected_order.split(" - ")[0]
160
+ with st.spinner("Loading order details..."):
161
+ order_context_raw = sqlite_agent.invoke(f"Fetch all columns for order ID {st.session_state.order_id}")
162
+ st.session_state.order_context = f"Order ID: {st.session_state.order_id}\n{order_context_raw}\nToday's Date: 25 July"
163
+
164
+ if "order_context" in st.session_state:
165
+ st.markdown("### Chat with Assistant")
166
+
167
+ for msg in st.session_state.chat_history:
168
+ st.chat_message("user").write(msg["user"])
169
+ st.chat_message("assistant").write(msg["assistant"])
170
+
171
+ user_query = st.chat_input("How can I help you?")
172
+
173
+ if user_query:
174
+ intent_prompt = f"""You are an intent classifier for customer service queries. Your task is to classify the user's query into one of the following 3 categories based on tone, completeness, and content.
175
+ Return **only the numeric category ID (0, 1, 2 and 3)** as the output. Do not include any explanation or extra text.
176
+ ### Categories:
177
+ 0 **Escalation**
178
+ - The user is very angry, frustrated, or upset.
179
+ - Uses strong emotional language (e.g., “This is unacceptable”, “Worst service ever”, “I’m tired of this”, “I want a human now”).
180
+ - Requires **immediate human handoff**.
181
+ - Escalation confidence must be very high (90% or more).
182
+ 1 **Exit**
183
+ - The user is ending the conversation or expressing satisfaction.
184
+ - Phrases like “Thanks”, “Got it”, “Okay”, “Resolved”, “Never mind”.
185
+ - No further action is required.
186
+ 2 **Process**
187
+ - The query is clear and well-formed.
188
+ - Contains enough detail to act on (e.g., mentions order ID, issue, date).
189
+ - Language is polite or neutral; the query is actionable.
190
+ - Proceed with normal handling.
191
+ 3- **Random Question**
192
+ - If user asked something not related to order
193
+ example - What is NLP
194
+ ---
195
+ Your job:
196
+ Read the user query and return just the category number (0, 1, 2, or 3). Do not include explanations, formatting, or any text beyond the number.
197
+ User Query: {user_query}"""
198
+ intent = llm.predict(intent_prompt).strip()
199
+
200
+ if intent == "0":
201
+ response = "Sorry for the inconvenience. A human agent will assist you shortly 1."
202
+ elif intent == "1":
203
+ response = "Thank you! I hope I was able to help."
204
+ elif intent == "3":
205
+ response = "Apologies, I’m currently only able to help with information about your placed orders. Please let me know how I can assist you with those!"
206
+ else:
207
+ full_prompt = f"""
208
+ Context:
209
+ {st.session_state.order_context}
210
+ Customer Query: {user_query}
211
+ Previous response: {st.session_state.chat_history}
212
+ Use tools to reply.
213
+ """
214
+ with st.spinner("Generating response..."):
215
+ raw_response = order_agent.run(full_prompt)
216
+
217
+ # Step 1: Evaluate quality (Groundedness and Precision first)
218
+ scores = evaluate_response_quality(st.session_state.order_context, user_query, raw_response)
219
+ if scores["groundedness"] < 0.75 or scores["precision"] < 0.75:
220
+ regenerated_response = order_agent.run(full_prompt)
221
+ scores_retry = evaluate_response_quality(st.session_state.order_context, user_query, regenerated_response)
222
+ if scores_retry["groundedness"] >= 0.75 and scores_retry["precision"] >= 0.75:
223
+ response = regenerated_response
224
+ else:
225
+ response = "Your request is being forwarded to a customer support specialist. A human agent will assist you shortly."
226
+ else:
227
+ response = raw_response
228
+
229
+ # Step 2: Guard check (after passing quality check)
230
+ if response not in [
231
+ "Your request is being forwarded to a customer support specialist. A human agent will assist you shortly."
232
+ ]:
233
+ guard = output_guard_check(response)
234
+ if guard == "BLOCK":
235
+ response = "Your request is being forwarded to a customer support specialist. A human agent will assist you shortly."
236
+
237
+ # Save chat history
238
+ st.session_state.chat_history.append({"user": user_query, "assistant": response})
239
+
240
+ # Step 3: Conversation-level safety
241
+ conv_check = conversation_guard_check(st.session_state.chat_history)
242
+ if conv_check == "BLOCK":
243
+ response = "Your request is being forwarded to a customer support specialist. A human agent will assist you shortly."
244
+
245
+
246
+ st.chat_message("user").write(user_query)
247
+ st.chat_message("assistant").write(response)
248
+
249
+
250
+ else:
251
+ st.info("Please enter a Customer ID to begin.")