tanish78 commited on
Commit
9ae5162
·
verified ·
1 Parent(s): 7612df7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -469
app.py CHANGED
@@ -10,463 +10,6 @@ import matplotlib.pyplot as plt
10
  import plotly.express as px
11
  from PIL import Image
12
 
13
-
14
- categories_keywords = {
15
- "Application Status": ["application status", "application", "status", "submitted", "processing", "pending", "approval", "rejected", "accepted"],
16
- "Volunteering": ["volunteer", "volunteering", "help out", "assist", "volunteer work", "volunteer opportunities"],
17
- "Certificates": ["certificate", "certificates", "completion", "certification", "accreditation", "proof", "document", "certified"],
18
- "Job Opportunities": ["job", "opportunity", "career", "vacancy", "position", "employment", "hiring", "recruitment", "internship"],
19
- "Surveys and Forms": ["survey", "form", "forms", "questionnaire", "feedback form", "response", "fill out", "submission"],
20
- "General Queries": ["hello", "hi", "help", "general", "query", "question", "info", "information", "inquiry", "ask"],
21
- "Spam": ["spam", "subscribe", "remove", "stop", "junk", "block", "opt-out", "watch my video", "click on this link", "win prize"],
22
- "Rescheduling and Postponing": ["reschedule", "postpone", "delay", "change date", "new time", "rearrange", "shift", "adjust timing"],
23
- "Contact and Communication Issues": ["contact", "communicate", "communication", "reach out", "phone", "email", "address", "details"],
24
- "Email and Credentials Issues": ["email", "credentials", "login", "password", "access", "username", "account", "verification", "reset"],
25
- "Timing and Scheduling": ["timing", "schedule", "scheduling", "time", "appointment", "availability", "calendar", "book", "slot"],
26
- "Salary and Benefits": ["salary", "benefits", "pay", "compensation", "wages", "earnings", "package", "remuneration", "incentives"],
27
- "Technical Issues": ["technical", "issue", "problem", "error", "bug", "glitch", "fix", "troubleshoot", "support"],
28
- "End of Conversation": ["bye", "thank you", "thanks", "goodbye", "see you", "later", "end", "close", "sign off"],
29
- "Start of Conversation": ["start", "begin", "hello", "hi", "initiate", "greet", "greeting", "open", "commence"],
30
- "Feedback": ["feedback", "comments", "review", "opinion", "suggestion", "critique", "rating"],
31
- "Online Meetings": ["meeting", "meeting code", "passcode", "join meeting", "zoom", "zoom link"],
32
- "Event Inquiries": ["event", "webinar", "meeting", "conference", "session", "seminar", "workshop", "invitation"],
33
- "Payment Issues": ["payment", "billing", "transaction", "charge", "fee", "invoice", "refund", "receipt"],
34
- "Registration Issues": ["registration", "register", "sign up", "enroll", "join", "signup", "enrollment"],
35
- "Service Requests": ["service", "support", "request", "assistance", "help", "aid", "maintenance"],
36
- "Account Issues": ["account", "profile", "update", "activation", "deactivation", "credentials", "reset"],
37
- "Product Information": ["product", "service", "details", "info", "information", "specifications", "features"],
38
- "Order Status": ["order", "status", "tracking", "shipment", "delivery", "purchase", "dispatch"],
39
- "Miscellaneous": []
40
- }
41
-
42
- def categorize_question(question):
43
- # Split the question into words
44
- words = question.split()
45
-
46
- # Check if the question has only one word
47
- if len(words) == 1:
48
- single_word = words[0].lower()
49
- # Check if the single word is in the Start of Conversation category
50
- if any(single_word in keyword for keyword in categories_keywords["Start of Conversation"]):
51
- return "Start of Conversation"
52
- else:
53
- return "End of Conversation"
54
-
55
- # Categorization of other queries
56
- for category, keywords in categories_keywords.items():
57
- if any(keyword.lower() in question.lower() for keyword in keywords):
58
- return category
59
- return "Miscellaneous"
60
-
61
-
62
-
63
- def preprocess_data(df):
64
- df.rename(columns={'Question Asked': 'texts'}, inplace=True)
65
- df['texts'] = df['texts'].astype(str).str.lower()
66
- df['texts'] = df['texts'].apply(lambda text: re.sub(r'https?://\S+|www\.\S+', '', text))
67
-
68
- def remove_emoji(string):
69
- emoji_pattern = re.compile("["
70
- u"\U0001F600-\U0001F64F"
71
- u"\U0001F300-\U0001F5FF"
72
- u"\U0001F680-\U0001F6FF"
73
- u"\U0001F1E0-\U0001F1FF"
74
- u"\U00002702-\U000027B0"
75
- u"\U000024C2-\U0001F251"
76
- "]+", flags=re.UNICODE)
77
- return emoji_pattern.sub(r'', string)
78
-
79
- df['texts'] = df['texts'].apply(remove_emoji)
80
-
81
- custom_synonyms = {
82
- 'application': ['form'],
83
- 'apply': ['fill', 'applied'],
84
- 'work': ['job'],
85
- 'salary': ['stipend', 'pay', 'payment', 'paid'],
86
- 'test': ['online test', 'amcat test', 'exam', 'assessment'],
87
- 'pass': ['clear', 'selected', 'pass or not'],
88
- 'result': ['outcome', 'mark', 'marks'],
89
- 'thanks': ["thanks a lot to you", "thankyou so much", "thank you so much", "tysm", "thank you",
90
- "okaythank", "thx", "ty", "thankyou", "thank", "thank u"],
91
- 'interview': ["pi"]
92
- }
93
-
94
- for original_word, synonym_list in custom_synonyms.items():
95
- for synonym in synonym_list:
96
- pattern = r"\b" + synonym + r"\b"
97
- df['texts'] = df['texts'].str.replace(pattern, original_word, regex=True)
98
-
99
- spam_list = ["click here", "free", "recharge", "limited", "discount", "money back guarantee", "aaj", "kal", "mein",
100
- "how can i help you", "how can we help you", "how we can help you", "follow", "king", "contacting", "gar",
101
- "kirke", "subscribe", "youtube", "jio", "insta", "make money", "b2b", "sent using truecaller"]
102
-
103
- for spam_phrase in spam_list:
104
- pattern = r"\b" + re.escape(spam_phrase) + r"\b"
105
- df = df[~df['texts'].str.contains(pattern)]
106
-
107
- def remove_punctuations(text):
108
- return re.sub(r'[^\w\s]', '', text)
109
-
110
- df['texts'] = df['texts'].apply(remove_punctuations)
111
- df['texts'] = df['texts'].str.strip()
112
- df = df[df['texts'] != '']
113
-
114
- # Categorize the texts
115
- df['Category'] = df['texts'].apply(categorize_question)
116
-
117
- return df
118
-
119
- def cluster_data(df, num_clusters):
120
- vectorizer = TfidfVectorizer(stop_words='english')
121
- X = vectorizer.fit_transform(df['texts'])
122
-
123
- kmeans = KMeans(n_clusters=num_clusters, random_state=0)
124
- kmeans.fit(X)
125
- df['Cluster'] = kmeans.labels_
126
-
127
- return df, kmeans
128
-
129
- def generate_wordcloud(df):
130
- text = " ".join(df['texts'].tolist())
131
- stopwords = set(STOPWORDS)
132
- wordcloud = WordCloud(
133
- width=800,
134
- height=400,
135
- background_color='white',
136
- max_words=300,
137
- collocations=False,
138
- min_font_size=10,
139
- max_font_size=200,
140
- stopwords=stopwords,
141
- prefer_horizontal=1.0,
142
- scale=2,
143
- relative_scaling=0.5,
144
- random_state=42
145
- ).generate(text)
146
-
147
- plt.figure(figsize=(15, 7))
148
- plt.imshow(wordcloud, interpolation='bilinear')
149
- plt.axis('off')
150
- buf = BytesIO()
151
- plt.savefig(buf, format='png')
152
- buf.seek(0)
153
- img = Image.open(buf)
154
- return img
155
-
156
- def generate_bar_chart(df, num_clusters_to_display):
157
- # Exclude common words from the top words
158
- common_words = {'i', 'you', 'thanks', 'thank', 'ok', 'okay', 'sure', 'done'}
159
-
160
- top_categories = df['Category'].value_counts().index[:num_clusters_to_display]
161
- df_top_categories = df[df['Category'].isin(top_categories)]
162
-
163
- category_top_words = df_top_categories.groupby('Category', observed=False)['texts'].apply(lambda x: ' '.join(x)).reset_index()
164
- category_top_words['top_word'] = category_top_words['texts'].apply(lambda x: ' '.join([word for word in pd.Series(x.split()).value_counts().index if word not in common_words][:3]))
165
- category_sizes = df_top_categories['Category'].value_counts().reset_index()
166
- category_sizes.columns = ['Category', 'Count']
167
- category_sizes = category_sizes.merge(category_top_words[['Category', 'top_word']], on='Category')
168
-
169
- fig = px.bar(category_sizes, x='Category', y='Count', text='top_word', title='Category Frequency with Top Words')
170
- fig.update_traces(textposition='outside')
171
- fig.update_layout(xaxis_title='Category', yaxis_title='Frequency', showlegend=False)
172
-
173
- buf = BytesIO()
174
- fig.write_image(buf, format='png')
175
- buf.seek(0)
176
- img = Image.open(buf)
177
- return img
178
-
179
- def main(file, num_clusters_to_display):
180
- try:
181
- df = pd.read_csv(file)
182
-
183
- # Filter by 'Fallback Message shown'
184
- df = df[df['Answer'] == 'Fallback Message shown']
185
-
186
- df = preprocess_data(df)
187
-
188
- # Get category sizes and sort by size in ascending order
189
- category_sizes = df['Category'].value_counts().reset_index()
190
- category_sizes.columns = ['Category', 'Count']
191
- sorted_categories = category_sizes.sort_values(by='Count', ascending=True)['Category'].tolist()
192
-
193
- # Get the largest x categories as specified by num_clusters_to_display
194
- largest_categories = sorted_categories[:num_clusters_to_display]
195
-
196
- # Filter the dataframe to include only the largest categories
197
- filtered_df = df[df['Category'].isin(largest_categories)]
198
-
199
- # Sort the dataframe by Category
200
- filtered_df = filtered_df.sort_values(by='Category')
201
-
202
- wordcloud_img = generate_wordcloud(filtered_df)
203
- bar_chart_img = generate_bar_chart(filtered_df, num_clusters_to_display)
204
-
205
- with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmpfile:
206
- filtered_df.to_csv(tmpfile.name, index=False)
207
- csv_file_path = tmpfile.name
208
-
209
- return csv_file_path, wordcloud_img, bar_chart_img
210
- except Exception as e:
211
- print(f"Error: {e}")
212
- return str(e), None, None
213
-
214
- interface = gr.Interface(
215
- fn=main,
216
- inputs=[
217
- gr.File(label="Upload CSV File (.csv)"),
218
- gr.Slider(label="Number of Categories to Display", minimum=1, maximum=15, step=1, value=5)
219
- ],
220
- outputs=[
221
- gr.File(label="Categorized Data CSV"),
222
- gr.Image(label="Word Cloud"),
223
- gr.Image(label="Bar Chart")
224
- ],
225
- title="Unanswered User Queries Categorization",
226
- description="Categorize unanswered user queries into predefined categories"
227
- )
228
-
229
- interface.launch(share=True)
230
-
231
-
232
- In this code, why does the selection of 'number of categories' (slider) affect the quality of the output? I want this slider to ONLY change the number of cateogies visible, not the actual categorization itself. However, when this slider is changed, it does not accurately display only the x largest categories. Additionally, the bar graph and the word cloud should be INDEPENDENT of this slider and should act on the data as a whole regardless of the slider.
233
- ChatGPT
234
- Memory updated
235
- The issue you're experiencing with the slider affecting the output is due to the way the filtering is implemented before generating the visualizations. To ensure that the slider only affects the number of categories displayed and not the actual categorization or the visualizations, we need to modify the code accordingly.
236
-
237
- Here’s the updated code:
238
-
239
- python
240
- Copy code
241
- import gradio as gr
242
- import pandas as pd
243
- from sklearn.feature_extraction.text import TfidfVectorizer
244
- from sklearn.cluster import KMeans
245
- import re
246
- from io import BytesIO
247
- import tempfile
248
- from wordcloud import WordCloud, STOPWORDS
249
- import matplotlib.pyplot as plt
250
- import plotly.express as px
251
- from PIL import Image
252
-
253
- categories_keywords = {
254
- "Application Status": ["application status", "application", "status", "submitted", "processing", "pending", "approval", "rejected", "accepted"],
255
- "Volunteering": ["volunteer", "volunteering", "help out", "assist", "volunteer work", "volunteer opportunities"],
256
- "Certificates": ["certificate", "certificates", "completion", "certification", "accreditation", "proof", "document", "certified"],
257
- "Job Opportunities": ["job", "opportunity", "career", "vacancy", "position", "employment", "hiring", "recruitment", "internship"],
258
- "Surveys and Forms": ["survey", "form", "forms", "questionnaire", "feedback form", "response", "fill out", "submission"],
259
- "General Queries": ["hello", "hi", "help", "general", "query", "question", "info", "information", "inquiry", "ask"],
260
- "Spam": ["spam", "subscribe", "remove", "stop", "junk", "block", "opt-out", "watch my video", "click on this link", "win prize"],
261
- "Rescheduling and Postponing": ["reschedule", "postpone", "delay", "change date", "new time", "rearrange", "shift", "adjust timing"],
262
- "Contact and Communication Issues": ["contact", "communicate", "communication", "reach out", "phone", "email", "address", "details"],
263
- "Email and Credentials Issues": ["email", "credentials", "login", "password", "access", "username", "account", "verification", "reset"],
264
- "Timing and Scheduling": ["timing", "schedule", "scheduling", "time", "appointment", "availability", "calendar", "book", "slot"],
265
- "Salary and Benefits": ["salary", "benefits", "pay", "compensation", "wages", "earnings", "package", "remuneration", "incentives"],
266
- "Technical Issues": ["technical", "issue", "problem", "error", "bug", "glitch", "fix", "troubleshoot", "support"],
267
- "End of Conversation": ["bye", "thank you", "thanks", "goodbye", "see you", "later", "end", "close", "sign off"],
268
- "Start of Conversation": ["start", "begin", "hello", "hi", "initiate", "greet", "greeting", "open", "commence"],
269
- "Feedback": ["feedback", "comments", "review", "opinion", "suggestion", "critique", "rating"],
270
- "Online Meetings": ["meeting", "meeting code", "passcode", "join meeting", "zoom", "zoom link"],
271
- "Event Inquiries": ["event", "webinar", "meeting", "conference", "session", "seminar", "workshop", "invitation"],
272
- "Payment Issues": ["payment", "billing", "transaction", "charge", "fee", "invoice", "refund", "receipt"],
273
- "Registration Issues": ["registration", "register", "sign up", "enroll", "join", "signup", "enrollment"],
274
- "Service Requests": ["service", "support", "request", "assistance", "help", "aid", "maintenance"],
275
- "Account Issues": ["account", "profile", "update", "activation", "deactivation", "credentials", "reset"],
276
- "Product Information": ["product", "service", "details", "info", "information", "specifications", "features"],
277
- "Order Status": ["order", "status", "tracking", "shipment", "delivery", "purchase", "dispatch"],
278
- "Miscellaneous": []
279
- }
280
-
281
- def categorize_question(question):
282
- words = question.split()
283
- if len(words) == 1:
284
- single_word = words[0].lower()
285
- if any(single_word in keyword for keyword in categories_keywords["Start of Conversation"]):
286
- return "Start of Conversation"
287
- else:
288
- return "End of Conversation"
289
-
290
- for category, keywords in categories_keywords.items():
291
- if any(keyword.lower() in question.lower() for keyword in keywords):
292
- return category
293
- return "Miscellaneous"
294
-
295
- def preprocess_data(df):
296
- df.rename(columns={'Question Asked': 'texts'}, inplace=True)
297
- df['texts'] = df['texts'].astype(str).str.lower()
298
- df['texts'] = df['texts'].apply(lambda text: re.sub(r'https?://\S+|www\.\S+', '', text))
299
-
300
- def remove_emoji(string):
301
- emoji_pattern = re.compile("["
302
- u"\U0001F600-\U0001F64F"
303
- u"\U0001F300-\U0001F5FF"
304
- u"\U0001F680-\U0001F6FF"
305
- u"\U0001F1E0-\U0001F1FF"
306
- u"\U00002702-\U000027B0"
307
- u"\U000024C2-\U0001F251"
308
- "]+", flags=re.UNICODE)
309
- return emoji_pattern.sub(r'', string)
310
-
311
- df['texts'] = df['texts'].apply(remove_emoji)
312
-
313
- custom_synonyms = {
314
- 'application': ['form'],
315
- 'apply': ['fill', 'applied'],
316
- 'work': ['job'],
317
- 'salary': ['stipend', 'pay', 'payment', 'paid'],
318
- 'test': ['online test', 'amcat test', 'exam', 'assessment'],
319
- 'pass': ['clear', 'selected', 'pass or not'],
320
- 'result': ['outcome', 'mark', 'marks'],
321
- 'thanks': ["thanks a lot to you", "thankyou so much", "thank you so much", "tysm", "thank you",
322
- "okaythank", "thx", "ty", "thankyou", "thank", "thank u"],
323
- 'interview': ["pi"]
324
- }
325
-
326
- for original_word, synonym_list in custom_synonyms.items():
327
- for synonym in synonym_list:
328
- pattern = r"\b" + synonym + r"\b"
329
- df['texts'] = df['texts'].str.replace(pattern, original_word, regex=True)
330
-
331
- spam_list = ["click here", "free", "recharge", "limited", "discount", "money back guarantee", "aaj", "kal", "mein",
332
- "how can i help you", "how can we help you", "how we can help you", "follow", "king", "contacting", "gar",
333
- "kirke", "subscribe", "youtube", "jio", "insta", "make money", "b2b", "sent using truecaller"]
334
-
335
- for spam_phrase in spam_list:
336
- pattern = r"\b" + re.escape(spam_phrase) + r"\b"
337
- df = df[~df['texts'].str.contains(pattern)]
338
-
339
- def remove_punctuations(text):
340
- return re.sub(r'[^\w\s]', '', text)
341
-
342
- df['texts'] = df['texts'].apply(remove_punctuations)
343
- df['texts'] = df['texts'].str.strip()
344
- df = df[df['texts'] != '']
345
-
346
- df['Category'] = df['texts'].apply(categorize_question)
347
-
348
- return df
349
-
350
- def cluster_data(df, num_clusters):
351
- vectorizer = TfidfVectorizer(stop_words='english')
352
- X = vectorizer.fit_transform(df['texts'])
353
-
354
- kmeans = KMeans(n_clusters=num_clusters, random_state=0)
355
- kmeans.fit(X)
356
- df['Cluster'] = kmeans.labels_
357
-
358
- return df, kmeans
359
-
360
- def generate_wordcloud(df):
361
- text = " ".join(df['texts'].tolist())
362
- stopwords = set(STOPWORDS)
363
- wordcloud = WordCloud(
364
- width=800,
365
- height=400,
366
- background_color='white',
367
- max_words=300,
368
- collocations=False,
369
- min_font_size=10,
370
- max_font_size=200,
371
- stopwords=stopwords,
372
- prefer_horizontal=1.0,
373
- scale=2,
374
- relative_scaling=0.5,
375
- random_state=42
376
- ).generate(text)
377
-
378
- plt.figure(figsize=(15, 7))
379
- plt.imshow(wordcloud, interpolation='bilinear')
380
- plt.axis('off')
381
- buf = BytesIO()
382
- plt.savefig(buf, format='png')
383
- buf.seek(0)
384
- img = Image.open(buf)
385
- return img
386
-
387
- def generate_bar_chart(df, num_clusters_to_display):
388
- common_words = {'i', 'you', 'thanks', 'thank', 'ok', 'okay', 'sure', 'done'}
389
-
390
- top_categories = df['Category'].value_counts().index[:num_clusters_to_display]
391
- df_top_categories = df[df['Category'].isin(top_categories)]
392
-
393
- category_top_words = df_top_categories.groupby('Category', observed=False)['texts'].apply(lambda x: ' '.join(x)).reset_index()
394
- category_top_words['top_word'] = category_top_words['texts'].apply(lambda x: ' '.join([word for word in pd.Series(x.split()).value_counts().index if word not in common_words][:3]))
395
- category_sizes = df_top_categories['Category'].value_counts().reset_index()
396
- category_sizes.columns = ['Category', 'Count']
397
- category_sizes = category_sizes.merge(category_top_words[['Category', 'top_word']], on='Category')
398
-
399
- fig = px.bar(category_sizes, x='Category', y='Count', text='top_word', title='Category Frequency with Top Words')
400
- fig.update_traces(textposition='outside')
401
- fig.update_layout(xaxis_title='Category', yaxis_title='Frequency', showlegend=False)
402
-
403
- buf = BytesIO()
404
- fig.write_image(buf, format='png')
405
- buf.seek(0)
406
- img = Image.open(buf)
407
- return img
408
-
409
- def main(file, num_clusters_to_display):
410
- try:
411
- df = pd.read_csv(file)
412
-
413
- df = df[df['Answer'] == 'Fallback Message shown']
414
-
415
- df = preprocess_data(df)
416
-
417
- wordcloud_img = generate_wordcloud(df)
418
- bar_chart_img = generate_bar_chart(df, num_clusters_to_display)
419
-
420
- with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmpfile:
421
- df.to_csv(tmpfile.name, index=False)
422
- csv_file_path = tmpfile.name
423
-
424
- return csv_file_path, wordcloud_img, bar_chart_img
425
- except Exception as e:
426
- print(f"Error: {e}")
427
- return str(e), None, None
428
-
429
- interface = gr.Interface(
430
- fn=main,
431
- inputs=[
432
- gr.File(label="Upload CSV File (.csv)"),
433
- gr.Slider(label="Number of Categories to Display", minimum=1, maximum=15, step=1, value=5)
434
- ],
435
- outputs=[
436
- gr.File(label="Categorized Data CSV"),
437
- gr.Image(label="Word Cloud"),
438
- gr.Image(label="Bar Chart")
439
- ],
440
- title="Unanswered User Queries Categorization",
441
- description="Categorize unanswered user queries into predefined categories"
442
- )
443
-
444
- interface.launch(share=True)
445
- Changes made:
446
-
447
- Categorization: The preprocess_data function now categorizes the data without any filtering based on the slider value.
448
- Visualizations: The word cloud and bar chart functions use the entire categorized data to generate the visuals.
449
- Filtering: The filtering based on the slider value is only applied within the generate_bar_chart function to limit the number of categories displayed in the bar chart.
450
- With these changes, the slider will only affect the number of categories displayed in the bar chart, and the word cloud will be generated using the entire dataset regardless of the slider value. The CSV output will also include all categorized data.
451
-
452
- Now the barchart works as intended, but the word cloud doesn't. Can we restore the old word cloud mechanism while still maintaining the new bar chart funcitonality, so both work as intended?
453
- ChatGPT
454
- Sure, we can restore the old word cloud mechanism to ensure it functions as intended while keeping the new bar chart functionality. Here is the updated code:
455
-
456
- python
457
- Copy code
458
- import gradio as gr
459
- import pandas as pd
460
- from sklearn.feature_extraction.text import TfidfVectorizer
461
- from sklearn.cluster import KMeans
462
- import re
463
- from io import BytesIO
464
- import tempfile
465
- from wordcloud import WordCloud, STOPWORDS
466
- import matplotlib.pyplot as plt
467
- import plotly.express as px
468
- from PIL import Image
469
-
470
  categories_keywords = {
471
  "Application Status": ["application status", "application", "status", "submitted", "processing", "pending", "approval", "rejected", "accepted"],
472
  "Volunteering": ["volunteer", "volunteering", "help out", "assist", "volunteer work", "volunteer opportunities"],
@@ -626,7 +169,7 @@ def generate_bar_chart(df, num_clusters_to_display):
626
  img = Image.open(buf)
627
  return img
628
 
629
- def main(file, num_categories_to_display):
630
  try:
631
  df = pd.read_csv(file)
632
 
@@ -634,19 +177,16 @@ def main(file, num_categories_to_display):
634
  df = df[df['Answer'] == 'Fallback Message shown']
635
 
636
  df = preprocess_data(df)
637
-
638
- df = df[df['Category'] != 'Miscellaneous']
639
 
640
- # Get category sizes and sort by size in descending order
641
  category_sizes = df['Category'].value_counts().reset_index()
642
  category_sizes.columns = ['Category', 'Count']
643
  sorted_categories = category_sizes.sort_values(by='Count', ascending=False)['Category'].tolist()
644
  sorted_categories_sm = category_sizes.sort_values(by='Count', ascending=True)['Category'].tolist()
645
-
646
 
647
- # Get the largest x categories as specified by num_categories_to_display
648
- largest_categories = sorted_categories[:num_categories_to_display]
649
- smallest_categories = sorted_categories_sm[:num_categories_to_display]
650
 
651
  # Filter the dataframe to include only the largest categories
652
  filtered_df = df[df['Category'].isin(largest_categories)]
@@ -656,8 +196,8 @@ def main(file, num_categories_to_display):
656
  filtered_df = filtered_df.sort_values(by='Category')
657
  filtered_cloud_df = filtered_cloud_df.sort_values(by='Category')
658
 
659
- wordcloud_img = generate_wordcloud(filtered_cloud_df) # Word cloud based on the entire dataframe
660
- bar_chart_img = generate_bar_chart(df, num_categories_to_display) # Bar chart based on the filtered dataframe
661
 
662
  with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmpfile:
663
  filtered_df.to_csv(tmpfile.name, index=False)
@@ -668,8 +208,6 @@ def main(file, num_categories_to_display):
668
  print(f"Error: {e}")
669
  return str(e), None, None
670
 
671
-
672
-
673
  interface = gr.Interface(
674
  fn=main,
675
  inputs=[
@@ -686,3 +224,4 @@ interface = gr.Interface(
686
  )
687
 
688
  interface.launch(share=True)
 
 
10
  import plotly.express as px
11
  from PIL import Image
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  categories_keywords = {
14
  "Application Status": ["application status", "application", "status", "submitted", "processing", "pending", "approval", "rejected", "accepted"],
15
  "Volunteering": ["volunteer", "volunteering", "help out", "assist", "volunteer work", "volunteer opportunities"],
 
169
  img = Image.open(buf)
170
  return img
171
 
172
+ def main(file, num_clusters_to_display):
173
  try:
174
  df = pd.read_csv(file)
175
 
 
177
  df = df[df['Answer'] == 'Fallback Message shown']
178
 
179
  df = preprocess_data(df)
 
 
180
 
181
+ # Get category sizes and sort by size in ascending order
182
  category_sizes = df['Category'].value_counts().reset_index()
183
  category_sizes.columns = ['Category', 'Count']
184
  sorted_categories = category_sizes.sort_values(by='Count', ascending=False)['Category'].tolist()
185
  sorted_categories_sm = category_sizes.sort_values(by='Count', ascending=True)['Category'].tolist()
 
186
 
187
+ # Get the largest x categories as specified by num_clusters_to_display
188
+ largest_categories = sorted_categories[:num_clusters_to_display]
189
+ smallest_categories = sorted_categories[:num_clusters_to_display]
190
 
191
  # Filter the dataframe to include only the largest categories
192
  filtered_df = df[df['Category'].isin(largest_categories)]
 
196
  filtered_df = filtered_df.sort_values(by='Category')
197
  filtered_cloud_df = filtered_cloud_df.sort_values(by='Category')
198
 
199
+ wordcloud_img = generate_wordcloud(filtered_cloud_df)
200
+ bar_chart_img = generate_bar_chart(filtered_df, num_clusters_to_display)
201
 
202
  with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmpfile:
203
  filtered_df.to_csv(tmpfile.name, index=False)
 
208
  print(f"Error: {e}")
209
  return str(e), None, None
210
 
 
 
211
  interface = gr.Interface(
212
  fn=main,
213
  inputs=[
 
224
  )
225
 
226
  interface.launch(share=True)
227
+