Baolina commited on
Commit
a4b22dc
·
verified ·
1 Parent(s): 24e11e6

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +23 -0
  2. app.py +255 -0
  3. requirements.txt +4 -0
Dockerfile ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ FROM python:3.9-slim
3
+
4
+ WORKDIR /app
5
+
6
+ RUN apt-get update && apt-get install -y \
7
+ build-essential \
8
+ curl \
9
+ software-properties-common \
10
+ git \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ COPY requirements.txt ./
14
+ COPY app.py ./
15
+ COPY src/ ./src/
16
+
17
+ RUN pip3 install -r requirements.txt
18
+
19
+ EXPOSE 8501
20
+
21
+ HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
22
+
23
+ ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
app.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit as st
3
+ import pandas as pd
4
+ import json
5
+ import os
6
+ from openai import OpenAI
7
+
8
+ # Load OpenAI API key and base URL from Colab secrets
9
+ try:
10
+ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
11
+ OPENAI_API_BASE = os.environ.get("OPENAI_API_BASE")
12
+ openai_client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
13
+ except Exception as e:
14
+ st.error(f"Error loading OpenAI credentials: {e}")
15
+ st.stop()
16
+
17
+
18
+ # Define the functions for categorization, metadata extraction, priority prediction, and response generation
19
+ def query_openai(prompt, query):
20
+ """
21
+ Queries the OpenAI model with a given prompt and query.
22
+ Args:
23
+ prompt (str): The prompt for the model.
24
+ query (str): The query to be answered by the model.
25
+ Returns:
26
+ str: The model's response.
27
+ """
28
+ messages = [
29
+ {"role": "system", "content": prompt},
30
+ {"role": "user", "content": query}
31
+ ]
32
+ response = openai_client.chat.completions.create(
33
+ model="gpt-3.5-turbo", # Or another suitable OpenAI model
34
+ messages=messages,
35
+ max_tokens=100 # Adjust max_tokens as needed
36
+ )
37
+ return response.choices[0].message.content
38
+
39
+ def classify_ticket(prompt, query):
40
+ """
41
+ Classifies a support ticket using the OpenAI model and returns the result in JSON format.
42
+ Args:
43
+ prompt (str): The classification prompt for the model.
44
+ query (str): The support ticket text to be classified.
45
+ Returns:
46
+ dict: A dictionary containing the classification result, or None if classification fails.
47
+ """
48
+ try:
49
+ response_text = query_openai(prompt, query)
50
+ # Attempt to parse the response text as JSON
51
+ classification_result = json.loads(response_text)
52
+ return classification_result
53
+ except json.JSONDecodeError as e:
54
+ st.error(f"Error decoding JSON from OpenAI response: {e}")
55
+ st.text(f"Raw OpenAI response: {response_text}")
56
+ return None
57
+ except Exception as e:
58
+ st.error(f"An unexpected error occurred during classification: {e}")
59
+ return None
60
+
61
+ def extract_metadata(prompt, query):
62
+ """
63
+ Extracts metadata from a support ticket using the OpenAI model and returns the result in JSON format.
64
+ Args:
65
+ prompt (str): The metadata extraction prompt for the model.
66
+ query (str): The support ticket text to extract metadata from.
67
+ Returns:
68
+ dict: A dictionary containing the extracted metadata, or None if extraction fails.
69
+ """
70
+ try:
71
+ response_text = query_openai(prompt, query)
72
+ # Attempt to parse the response text as JSON
73
+ metadata_result = json.loads(response_text)
74
+ return metadata_result
75
+ except json.JSONDecodeError as e:
76
+ st.error(f"Error decoding JSON from OpenAI response: {e}")
77
+ st.text(f"Raw OpenAI response: {response_text}")
78
+ return None
79
+ except Exception as e:
80
+ st.error(f"An unexpected error occurred during metadata extraction: {e}")
81
+ return None
82
+
83
+ def predict_priority(prompt, query, problem_type, user_impact):
84
+ """
85
+ Predicts the priority of a support ticket using the OpenAI model and returns the result in JSON format.
86
+ Args:
87
+ prompt (str): The priority prediction prompt for the model.
88
+ query (str): The support ticket text to predict the priority for.
89
+ problem_type (str): The extracted problem type.
90
+ user_impact (str): The extracted user impact.
91
+ Returns:
92
+ dict: A dictionary containing the predicted priority, or None if prediction fails.
93
+ """
94
+ try:
95
+ # Include problem_type and user_impact in the query sent to the model
96
+ full_query = f"""
97
+ Support Ticket: {query}
98
+ Problem Type: {problem_type}
99
+ User Impact: {user_impact}
100
+ Based on the support ticket, problem type, and user impact, predict the priority: Low, Medium, High, or Urgent.
101
+ Return only a structured JSON output in the following format:
102
+ {{"priority": "priority_prediction"}}
103
+ """
104
+ response_text = query_openai(prompt, full_query)
105
+ priority_result = json.loads(response_text)
106
+ return priority_result
107
+ except json.JSONDecodeError as e:
108
+ st.error(f"Error decoding JSON from OpenAI response: {e}")
109
+ st.text(f"Raw OpenAI response: {response_text}")
110
+ return None
111
+ except Exception as e:
112
+ st.error(f"An unexpected error occurred during priority prediction: {e}")
113
+ return None
114
+
115
+ def generate_response(response_prompt, query, category, metadata_tags, priority):
116
+ """
117
+ Generates a draft response for a support ticket using the OpenAI model.
118
+ Args:
119
+ response_prompt (str): The prompt for generating the response.
120
+ query (str): The original support ticket text.
121
+ category (str): The predicted category of the ticket.
122
+ metadata_tags (dict): The extracted metadata tags (Device, Problem Type, User Impact).
123
+ priority (str): The predicted priority of the ticket.
124
+ Returns:
125
+ str: The generated response text, or None if response generation fails.
126
+ """
127
+ # Combine the inputs into a single message for the model
128
+ user_message = f"""
129
+ Support Ticket: {query}
130
+ Category: {category}
131
+ Metadata Tags: {metadata_tags}
132
+ Priority: {priority}
133
+ """
134
+
135
+ try:
136
+ # Pass the combined message to the query_openai function
137
+ response_text = query_openai(response_prompt, user_message)
138
+ return response_text
139
+ except Exception as e:
140
+ st.error(f"An unexpected error occurred during response generation: {e}")
141
+ return None
142
+
143
+ # Define the prompts
144
+ classification_prompt = """
145
+ You are a technical assistant. Classify the support ticket based on the Support Ticket Text presented in the input into the following categories and not any other.
146
+ - Technical issues
147
+ - Hardware issues
148
+ - Data recovery
149
+ Return only a structured JSON output in the following format:
150
+ {"Category": "category_prediction"}
151
+ """
152
+
153
+ metadata_prompt = f"""
154
+ You are an intelligent assistant that extracts structured metadata from technical support queries.
155
+ Analyze the query and extract the following information:
156
+
157
+ * Device (e.g., Laptop, Phone, Router, etc.)
158
+ * Problem Type (e.g., Not Turning On, Lost Internet, Deleted Files)
159
+ * User Impact - Estimate based on how severely the issue affects the user's ability to continue working or using the device:
160
+
161
+ - * Major: The user cannot proceed with work at all.
162
+ - * Moderate: The user is impacted but may have a workaround.
163
+ - * Minor: The issue is present but does not significantly hinder usage.
164
+
165
+ Use the following examples as guidance.
166
+
167
+ Query Text: My phone battery is draining rapidly even on battery saver mode. I barely use it and it drops 50% in a few hours.
168
+ Output: {{"Device": "Phone", "Problem Type": "Battery Draining", "User Impact": "Minor"}}
169
+
170
+ Query Text: I accidentally deleted a folder containing all project files. Please help me recover it.
171
+ Output: {{"Device": "Laptop", "Problem Type": "Deleted Files", "User Impact": "Major"}}
172
+
173
+ Query Text: My router is not working.
174
+ Output: {{"Device": "Router", "Problem Type": "Lost Internet", "User Impact": "Moderate"}}
175
+
176
+ Return the final output only in a valid JSON format without any additional explanation.
177
+ """
178
+
179
+ priority_prompt ="""
180
+ You are an intelligent assistant that determines the priority level of a support ticket.
181
+ For any given ticket, follow this step-by-step reasoning process to assign the correct priority level: Low, Medium, High.
182
+ Step-by-step Evaluation:
183
+ Is the device or service completely unusable?
184
+ Is the issue blocking critical or time-sensitive work?
185
+ Is there a specific deadline or urgency mentioned by the user?
186
+ Does the user mention partial functionality or ongoing work?
187
+ Is the tone or language expressing frustration or emergency?
188
+ After evaluating each step, decide the most appropriate priority level based on the impact and urgency.
189
+ Finally, return only the structured output in valid JSON format, like this:
190
+ {"priority": "High"}
191
+ Do not include your reasoning in the output — just the JSON.
192
+ """
193
+
194
+ response_prompt = """
195
+ You are provided with a support ticket's text along with its Category, Tags, and assigned Priority level.
196
+
197
+ Follow these steps before generating your final response:
198
+
199
+ 1. Analyze the ticket text to understand the customer's sentiment and main concern.
200
+ 2. Identify the issue type using the provided Category and Tags.
201
+ 3. Determine the appropriate ETA based on the Priority level.
202
+ 4. Compose a short, empathetic response that reassures the customer, acknowledges their concern, and includes the ETA.
203
+
204
+ Ensure the final response:
205
+
206
+ 1. Is under 50 words
207
+ 2. Has a polite and empathetic tone
208
+ 3. Addresses the issue clearly
209
+
210
+ Return only the final response to the customer. Do not include your reasoning steps in the output.
211
+ """
212
+
213
+
214
+ # Streamlit App
215
+ st.title("Support Ticket Categorization System")
216
+
217
+ st.write("Enter the support ticket text below:")
218
+
219
+ support_ticket_input = st.text_area("Support Ticket Text", height=200)
220
+
221
+ if st.button("Process Ticket"):
222
+ if support_ticket_input:
223
+ st.write("Processing...")
224
+
225
+ # Categorization
226
+ category_result = classify_ticket(classification_prompt, support_ticket_input)
227
+ category = category_result.get('Category') if category_result else "N/A"
228
+ st.subheader("Category:")
229
+ st.write(category)
230
+
231
+ # Metadata Extraction
232
+ metadata_result = extract_metadata(metadata_prompt, support_ticket_input)
233
+ device = metadata_result.get('Device') if metadata_result else "N/A"
234
+ problem_type = metadata_result.get('Problem Type') if metadata_result else "N/A"
235
+ user_impact = metadata_result.get('User Impact') if metadata_result else "N/A"
236
+
237
+ st.subheader("Metadata:")
238
+ st.write(f"Device: {device}")
239
+ st.write(f"Problem Type: {problem_type}")
240
+ st.write(f"User Impact: {user_impact}")
241
+
242
+ # Priority Prediction
243
+ priority_result = predict_priority(priority_prompt, support_ticket_input, problem_type, user_impact)
244
+ priority = priority_result.get('priority') if priority_result else "N/A"
245
+ st.subheader("Priority:")
246
+ st.write(priority)
247
+
248
+ # Draft Response Generation
249
+ draft_response = generate_response(response_prompt, support_ticket_input, category, metadata_result, priority)
250
+ st.subheader("Draft Response:")
251
+ st.write(draft_response)
252
+
253
+
254
+ else:
255
+ st.warning("Please enter support ticket text to process.")
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ altair==5.5.0
2
+ pandas==2.2.2
3
+ streamlit==1.47.1
4
+ openai==1.97.1