mralberto8 commited on
Commit
4e3b484
·
verified ·
1 Parent(s): 63c04bd

Upload 5 files

Browse files
Files changed (5) hide show
  1. Dockerfile +43 -0
  2. app.py +257 -0
  3. config.json +4 -0
  4. kartify.db +0 -0
  5. requirements.txt +9 -0
Dockerfile ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.10-slim
3
+
4
+
5
+ # Set environment variables
6
+ ENV PYTHONUNBUFFERED 1
7
+
8
+
9
+ # Install system dependencies and git
10
+ RUN apt-get update && apt-get install -y \
11
+ build-essential \
12
+ git \
13
+ && rm -rf /var/lib/apt/lists/*
14
+
15
+
16
+
17
+
18
+ # Create a non-root user and set permissions
19
+ RUN useradd -ms /bin/bash appuser
20
+ # Set the working directory in the container
21
+ WORKDIR /home/appuser/app
22
+
23
+
24
+ # Copy the requirements file and install dependencies
25
+ COPY requirements.txt .
26
+ RUN pip install --upgrade pip && pip install -r requirements.txt
27
+
28
+
29
+ # Switch to non-root user
30
+ USER appuser
31
+
32
+
33
+ # Copy the rest of the application code into the container
34
+ COPY --chown=appuser . /home/appuser/app
35
+
36
+
37
+ # Expose the port that the app runs on
38
+ EXPOSE 8501
39
+
40
+
41
+ # Command to run the application
42
+ CMD ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
43
+
app.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sqlite3
3
+ import pandas as pd
4
+ import os
5
+ import json
6
+ from langchain.chat_models import ChatOpenAI
7
+ from langchain.agents import Tool, initialize_agent
8
+ from langchain.agents.agent_types import AgentType
9
+ from langchain_community.utilities.sql_database import SQLDatabase
10
+ from langchain_community.agent_toolkits import create_sql_agent
11
+ from langchain.schema import HumanMessage
12
+
13
+
14
+
15
+ # Load the JSON file and extract values
16
+ file_name = 'config.json'
17
+ with open(file_name, 'r') as file:
18
+ config = json.load(file)
19
+ API_KEY = config.get("API_KEY") # Loading the API Key
20
+ OPENAI_API_BASE = config.get("OPENAI_API_BASE") # Loading the API Base Url
21
+
22
+ # Set API keys and base
23
+ os.environ['OPENAI_API_KEY'] = API_KEY
24
+ os.environ['OPENAI_BASE_URL'] = OPENAI_API_BASE
25
+
26
+ llm = ChatOpenAI(model_name="gpt-4")
27
+
28
+ connection = sqlite3.connect("kartify.db", check_same_thread=False)
29
+ kartify_db = SQLDatabase.from_uri("sqlite:///kartify.db")
30
+ sqlite_agent = create_sql_agent(llm, db=kartify_db, agent_type="openai-tools", verbose=False)
31
+
32
+ def policy_tool_func(input: str) -> str:
33
+ prompt = f"""Only respond about return or replacement if the user has explicitly asked about it in their query.
34
+ Use the following context from order, shipment, and product policy data:
35
+ {input}
36
+ Your task (only if return or replacement is mentioned):
37
+ 1. Check eligibility based on `actual_delivery` and product policy:
38
+ - If `return_days_allowed` is 0, clearly state the product is not eligible for return.
39
+ - If within window, mention last date allowed for return and replacement.
40
+ - If the window has passed, say so.
41
+ 2. Mention return conditions (e.g., “Sealed only”).
42
+ 3. If `actual_delivery` is null, return/replacement is not yet applicable.
43
+ 4. If any required info is missing, say so politely that i am connecting to human support.
44
+ If the query does **not** mention return or replacement, do **not** include any information about it in your response.
45
+ Respond clearly and briefly — no system or SQL steps."""
46
+ return llm([HumanMessage(content=prompt)]).content.strip()
47
+
48
+ def answer_generation_tool(input: str) -> str:
49
+ prompt = f"""You are a polite and formal customer assistant replying to a user query to customer about return, replacement, delivery, cancellation, or address change.
50
+ Use the context provided below:
51
+ {input}
52
+ Guidelines:
53
+ - Respond in a short, formal, and factual tone.
54
+ - Do **not** add extra details that were not asked in the user's query.
55
+ - Do **not** format like an email — avoid greetings, sign-offs, or explanations.
56
+ - Do not offer return/replacement windows unless directly asked.
57
+ - Do not mention cancellation or refund policies unless cancellation is requested.
58
+ - Do not mention address change policy until asked.
59
+ - If the product has **not been delivered**, mention that return/replacement cannot be processed yet.
60
+ - If the product **has been delivered**, calculate based on delivery date whether return or replacement is still allowed.
61
+ - If the order has already **shipped**, reply that address changes are not possible.
62
+ - If any required info is missing, say so politely that i am connecting to human support.
63
+ - If you do not what to answer now, say so politely that i am connecting to human support.
64
+ - Never ask for any bank details
65
+ Output:
66
+ - Return only a single, relevant customer-facing message — no system instructions, reasoning, or metadata.
67
+ """
68
+ return llm([HumanMessage(content=prompt)]).content.strip()
69
+
70
+ def output_guard_check(model_output: str) -> str:
71
+ prompt = f"""
72
+ You are a content safety assistant. Your task is to classify if the assistant's response is appropriate.
73
+ If the message contains:
74
+ - Requests for bank details, OTPs, account numbers
75
+ - Harassment or offensive tone
76
+ - Privacy concerns or unsafe advice
77
+ - Misunderstanding and miscommunication word
78
+ - Phrases like "please contact customer service" or redirection to a human agent
79
+ - Escalated this to our support team
80
+ Return: BLOCK
81
+ Otherwise, return: SAFE
82
+ Response: {model_output}
83
+ Output:
84
+ """
85
+ return llm.predict(prompt).strip()
86
+
87
+ def evaluate_response_quality(context: str, query: str, response: str) -> dict:
88
+ prompt = f"""Evaluate the assistant's response to a customer query using the provided order context.
89
+
90
+ Context: {context}
91
+ Customer Query: {query}
92
+ Assistant's Response: {response}
93
+
94
+ Instructions:
95
+ 1. **Groundedness (0.0 to 1.0)**: Score based on how well the response is factually supported by the context.
96
+ - Score closer to 1 if all facts are accurate and derived from the context.
97
+ - Score closer to 0 if there is hallucination, guesswork, or any fabricated information.
98
+
99
+ 2. **Precision (0.0 to 1.0)**: Score based on how directly and accurately the assistant addresses the query.
100
+ - Score closer to 1 if the response is concise, focused, and answers the exact user query.
101
+ - Score closer to 0 if it includes irrelevant details or misses the main point.
102
+
103
+ Output format (JSON only):
104
+
105
+ groundedness: float between 0 and 1 ,
106
+ precision: float between 0 and 1
107
+
108
+ Only return the JSON. No explanations.
109
+
110
+ """
111
+ score = llm.predict(prompt).strip()
112
+ try:
113
+ return eval(score)
114
+ except:
115
+ return {"groundedness": 0.0, "precision": 0.0}
116
+
117
+
118
+ def conversation_guard_check(history) -> str:
119
+ chat_summary = "\n".join([f"Customer: {h['user']}\nAssistant: {h['assistant']}" for h in history])
120
+ prompt = f"""
121
+ You are a conversation monitor AI. Review the entire conversation and classify if the assistant:
122
+ - Repeatedly offered unnecessary return or replacement steps
123
+ - Gave more than what the user asked
124
+ - Missed signs of customer distress
125
+ - Ignored user's refusal of an option
126
+ If any of the above are TRUE, return BLOCK
127
+ Else, return SAFE
128
+ Conversation:
129
+ {chat_summary}
130
+ Output:
131
+ """
132
+ return llm.predict(prompt).strip()
133
+
134
+ tools = [
135
+ Tool(name="PolicyChecker", func=policy_tool_func, description="Check return and replacement eligibility."),
136
+ Tool(name="AnswerGenerator", func=answer_generation_tool, description="Craft final response.")
137
+ ]
138
+
139
+ order_agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=False, handle_parsing_errors=True)
140
+
141
+ st.title("📦 Kartify Order Query Chatbot")
142
+
143
+ customer_id = st.text_input("Enter your Customer ID:")
144
+
145
+ if customer_id:
146
+ query = """
147
+ SELECT
148
+ order_id,
149
+ product_description
150
+ FROM
151
+ orders
152
+ WHERE
153
+ customer_id = ?
154
+ ORDER BY order_date DESC
155
+ """
156
+ df = pd.read_sql_query(query, connection, params=(customer_id,))
157
+
158
+ if not df.empty:
159
+ selected_order = st.selectbox("Select your Order:", df["order_id"] + " - " + df["product_description"])
160
+ start_chat = st.button("Start Chat")
161
+
162
+ if start_chat:
163
+ # Reset chat state except customer ID and order ID
164
+ st.session_state.chat_history = []
165
+ st.session_state.order_id = selected_order.split(" - ")[0]
166
+ with st.spinner("Loading order details..."):
167
+ order_context_raw = sqlite_agent.invoke(f"Fetch all columns for order ID {st.session_state.order_id}")
168
+ st.session_state.order_context = f"Order ID: {st.session_state.order_id}\n{order_context_raw}\nToday's Date: 25 July"
169
+
170
+ if "order_context" in st.session_state:
171
+ st.markdown("### Chat with Assistant")
172
+
173
+ for msg in st.session_state.chat_history:
174
+ st.chat_message("user").write(msg["user"])
175
+ st.chat_message("assistant").write(msg["assistant"])
176
+
177
+ user_query = st.chat_input("How can I help you?")
178
+
179
+ if user_query:
180
+ intent_prompt = f"""You are an intent classifier for customer service queries. Your task is to classify the user's query into one of the following 3 categories based on tone, completeness, and content.
181
+ Return **only the numeric category ID (0, 1, 2 and 3)** as the output. Do not include any explanation or extra text.
182
+ ### Categories:
183
+ 0 — **Escalation**
184
+ - The user is very angry, frustrated, or upset.
185
+ - Uses strong emotional language (e.g., “This is unacceptable”, “Worst service ever”, “I’m tired of this”, “I want a human now”).
186
+ - Requires **immediate human handoff**.
187
+ - Escalation confidence must be very high (90% or more).
188
+ 1 — **Exit**
189
+ - The user is ending the conversation or expressing satisfaction.
190
+ - Phrases like “Thanks”, “Got it”, “Okay”, “Resolved”, “Never mind”.
191
+ - No further action is required.
192
+ 2 — **Process**
193
+ - The query is clear and well-formed.
194
+ - Contains enough detail to act on (e.g., mentions order ID, issue, date).
195
+ - Language is polite or neutral; the query is actionable.
196
+ - Proceed with normal handling.
197
+ 3- **Random Question**
198
+ - If user asked something not related to order
199
+ example - What is NLP
200
+ ---
201
+ Your job:
202
+ Read the user query and return just the category number (0, 1, 2, or 3). Do not include explanations, formatting, or any text beyond the number.
203
+ User Query: {user_query}"""
204
+ intent = llm.predict(intent_prompt).strip()
205
+
206
+ if intent == "0":
207
+ response = "Sorry for the inconvenience. A human agent will assist you shortly 1."
208
+ elif intent == "1":
209
+ response = "Thank you! I hope I was able to help."
210
+ elif intent == "3":
211
+ response = "Apologies, I’m currently only able to help with information about your placed orders. Please let me know how I can assist you with those!"
212
+ else:
213
+ full_prompt = f"""
214
+ Context:
215
+ {st.session_state.order_context}
216
+ Customer Query: {user_query}
217
+ Previous response: {st.session_state.chat_history}
218
+ Use tools to reply.
219
+ """
220
+ with st.spinner("Generating response..."):
221
+ raw_response = order_agent.run(full_prompt)
222
+
223
+ # Step 1: Evaluate quality (Groundedness and Precision first)
224
+ scores = evaluate_response_quality(st.session_state.order_context, user_query, raw_response)
225
+ if scores["groundedness"] < 0.75 or scores["precision"] < 0.75:
226
+ regenerated_response = order_agent.run(full_prompt)
227
+ scores_retry = evaluate_response_quality(st.session_state.order_context, user_query, regenerated_response)
228
+ if scores_retry["groundedness"] >= 0.75 and scores_retry["precision"] >= 0.75:
229
+ response = regenerated_response
230
+ else:
231
+ response = "Your request is being forwarded to a customer support specialist. A human agent will assist you shortly."
232
+ else:
233
+ response = raw_response
234
+
235
+ # Step 2: Guard check (after passing quality check)
236
+ if response not in [
237
+ "Your request is being forwarded to a customer support specialist. A human agent will assist you shortly."
238
+ ]:
239
+ guard = output_guard_check(response)
240
+ if guard == "BLOCK":
241
+ response = "Your request is being forwarded to a customer support specialist. A human agent will assist you shortly."
242
+
243
+ # Save chat history
244
+ st.session_state.chat_history.append({"user": user_query, "assistant": response})
245
+
246
+ # Step 3: Conversation-level safety
247
+ conv_check = conversation_guard_check(st.session_state.chat_history)
248
+ if conv_check == "BLOCK":
249
+ response = "Your request is being forwarded to a customer support specialist. A human agent will assist you shortly."
250
+
251
+
252
+ st.chat_message("user").write(user_query)
253
+ st.chat_message("assistant").write(response)
254
+
255
+
256
+ else:
257
+ st.info("Please enter a Customer ID to begin.")
config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "OPENAI_API_KEY": gl-U2FsdGVkX1+d5mgP3h5d54Zj1DGpfHggCbucyJxRLSwL+diAGgwcNKXVCZBEoMFP,
3
+ "OPENAI_API_BASE": https://aibe.mygreatlearning.com/openai/v1
4
+ }
kartify.db ADDED
Binary file (24.6 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ altair
2
+ streamlit==1.46.1
3
+ openai==1.93.0
4
+ langchain==0.3.26
5
+ langchain-openai==0.3.27
6
+ langchainhub==0.1.21
7
+ langchain-experimental==0.3.4
8
+ pandas==2.2.2
9
+ numpy==2.0.2