tanish78 commited on
Commit
a570ee2
·
verified ·
1 Parent(s): ae6433b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +123 -45
app.py CHANGED
@@ -2,12 +2,14 @@ import gradio as gr
2
  import pandas as pd
3
  from sklearn.feature_extraction.text import TfidfVectorizer
4
  from sklearn.cluster import KMeans
5
- from sklearn.preprocessing import normalize
6
- from wordcloud import WordCloud
7
  import matplotlib.pyplot as plt
8
- from io import BytesIO
9
  import re
 
10
  import tempfile
 
 
 
11
 
12
  def preprocess_data(df):
13
  df.rename(columns={'Question Asked': 'texts'}, inplace=True)
@@ -15,6 +17,19 @@ def preprocess_data(df):
15
  df['texts'] = df['texts'].str.lower()
16
  df['texts'] = df['texts'].apply(lambda text: re.sub(r'https?://\S+|www\.\S+', '', text))
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  custom_synonyms = {
19
  'application': ['form'],
20
  'apply': ['fill', 'applied'],
@@ -35,73 +50,136 @@ def preprocess_data(df):
35
  pattern = r"\b" + synonym + r"\s+you" + r"\b(?!\s*\()"
36
  df['texts'] = df['texts'].str.replace(pattern, original_word + ' ', regex=True)
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  return df
39
 
40
  def cluster_data(df, num_clusters):
41
  vectorizer = TfidfVectorizer(stop_words='english')
42
  X = vectorizer.fit_transform(df['texts'])
43
- X = normalize(X)
44
 
45
  kmeans = KMeans(n_clusters=num_clusters, random_state=0)
46
  kmeans.fit(X)
47
  df['Cluster'] = kmeans.labels_
48
 
49
- return df
 
 
 
 
 
50
 
51
- def generate_wordcloud(texts):
52
- wordcloud = WordCloud(width=800, height=400, background_color='white').generate(" ".join(texts))
 
53
  plt.figure(figsize=(10, 5))
54
  plt.imshow(wordcloud, interpolation='bilinear')
55
  plt.axis('off')
56
  buf = BytesIO()
57
  plt.savefig(buf, format='png')
58
  buf.seek(0)
59
- return buf
60
-
61
- def main(file, num_clusters, num_clusters_to_display):
62
- df = pd.read_csv(file)
63
-
64
- # Filter by 'Fallback Message shown'
65
- df = df[df['Answer'] == 'Fallback Message shown']
66
-
67
- df = preprocess_data(df)
68
- df = cluster_data(df, num_clusters)
69
-
70
- cluster_sizes = df['Cluster'].value_counts()
71
- sorted_clusters = cluster_sizes.index.tolist()
72
- df['Cluster'] = pd.Categorical(df['Cluster'], categories=sorted_clusters, ordered=True)
73
- df = df.sort_values('Cluster')
74
-
75
- # Filter out the largest cluster and get the next largest clusters
76
- largest_cluster = sorted_clusters[0]
77
- filtered_clusters = sorted_clusters[1:num_clusters_to_display+1]
78
-
79
- df = df[df['Cluster'].isin(filtered_clusters)]
80
- df['Cluster'] = pd.Categorical(df['Cluster'], categories=filtered_clusters, ordered=True)
81
- df = df.sort_values('Cluster')
82
-
83
- wordclouds = []
84
- for cluster in filtered_clusters:
85
- texts = df[df['Cluster'] == cluster]['texts'].tolist()
86
- wordcloud_image = generate_wordcloud(texts)
87
- wordclouds.append((f"Cluster {cluster}", wordcloud_image))
88
-
89
- with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmpfile:
90
- df.to_csv(tmpfile.name, index=False)
91
- csv_file_path = tmpfile.name
92
-
93
- return wordclouds, csv_file_path
 
94
 
95
  interface = gr.Interface(
96
  fn=main,
97
  inputs=[
98
  gr.File(label="Upload CSV File (.csv)"),
99
- gr.Slider(label="Number of Clusters", minimum=2, maximum=20, step=1, value=5),
100
  gr.Slider(label="Number of Categories to Display", minimum=1, maximum=10, step=1, value=5)
101
  ],
102
  outputs=[
103
- gr.Gallery(label="Word Clouds of Clusters"),
104
- gr.File(label="Clustered Data CSV")
105
  ],
106
  title="Unanswered User Queries Clustering",
107
  description="Unanswered User Query Categorization"
 
2
  import pandas as pd
3
  from sklearn.feature_extraction.text import TfidfVectorizer
4
  from sklearn.cluster import KMeans
 
 
5
  import matplotlib.pyplot as plt
6
+ from sklearn.decomposition import PCA
7
  import re
8
+ from io import BytesIO
9
  import tempfile
10
+ import numpy as np
11
+ from PIL import Image
12
+ from wordcloud import WordCloud
13
 
14
  def preprocess_data(df):
15
  df.rename(columns={'Question Asked': 'texts'}, inplace=True)
 
17
  df['texts'] = df['texts'].str.lower()
18
  df['texts'] = df['texts'].apply(lambda text: re.sub(r'https?://\S+|www\.\S+', '', text))
19
 
20
+ def remove_emoji(string):
21
+ emoji_pattern = re.compile("["
22
+ u"\U0001F600-\U0001F64F"
23
+ u"\U0001F300-\U0001F5FF"
24
+ u"\U0001F680-\U0001F6FF"
25
+ u"\U0001F1E0-\U0001F1FF"
26
+ u"\U00002702-\U000027B0"
27
+ u"\U000024C2-\U0001F251"
28
+ "]+", flags=re.UNICODE)
29
+ return emoji_pattern.sub(r'', string) if isinstance(string, str) else string
30
+
31
+ df['texts'] = df['texts'].apply(remove_emoji)
32
+
33
  custom_synonyms = {
34
  'application': ['form'],
35
  'apply': ['fill', 'applied'],
 
50
  pattern = r"\b" + synonym + r"\s+you" + r"\b(?!\s*\()"
51
  df['texts'] = df['texts'].str.replace(pattern, original_word + ' ', regex=True)
52
 
53
+ spam_list = ["click here", "free", "recharge", "limited", "discount", "money back guarantee", "aaj", "kal", "mein",
54
+ "how can i help you", "how can we help you", "how we can help you", "follow", "king", "contacting", "gar",
55
+ "kirke", "subscribe", "youtube", "jio", "insta", "make money", "b2b","sent using truecaller"]
56
+
57
+ rows_to_remove = set()
58
+ for spam_phrase in spam_list:
59
+ pattern = r"\b" + re.escape(spam_phrase) + r"\b"
60
+ spam_rows = df['texts'].str.contains(pattern)
61
+ rows_to_remove.update(df.index[spam_rows].tolist())
62
+
63
+ df = df.drop(rows_to_remove)
64
+
65
+ greet_variations = ["hello", "hy", "hey", "hii", "hi", "heyyy", "bie", "bye"]
66
+ for greet_var in greet_variations:
67
+ pattern = r"(?<!\S)" + greet_var + r"(?!\S)|\b" + greet_var + r"\b"
68
+ df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
69
+
70
+ okay_variations = ["ok", "k", "kay", "okay", "okie", "kk", "ohhhk","t","r"]
71
+ for okay_var in okay_variations:
72
+ pattern = r"(?<!\S)" + okay_var + r"(?!\S)|\b" + okay_var + r"\b"
73
+ df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
74
+
75
+ yes_variations = ["yes", "yeah", "yep", "yup", "yuh", "ya", "yes got it", "yeah it is", "yesss", "yea","no"]
76
+ for yes_var in yes_variations:
77
+ pattern = r"(?<!\S)" + yes_var + r"(?!\S)|\b" + yes_var + r"\b"
78
+ df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
79
+
80
+ remove_phrases = ["i'm all set","ask a question","apply the survey","videos (2-8 min)","long reads (> 8 min)",
81
+ "short reads (3-8 min)","not a student alumni","mock","share feedback","bite size (< 2 min)",
82
+ "actually no","next steps","i'm a student alumni","i have questions"]
83
+
84
+ for phrase in remove_phrases:
85
+ df['texts'] = df['texts'].str.replace(phrase, '')
86
+
87
+ general_variations = ["good morning", "good evening", "good afternoon", "good night", "done", "sorry", "top", "query",
88
+ "stop", "sir", "sure", "oh", "wow", "aaa", "maam", "mam", "ma&#39;am","i'm all set","ask a question","apply the survey",
89
+ "videos (2-8 min)","long reads (> 8 min)","short reads (3-8 min)","not a student alumni","mock","share feedback","bite size (< 2 min)",
90
+ "actually no","next steps","i'm a student alumni","i have questions"]
91
+ for gen_var in general_variations:
92
+ pattern = r"(?<!\S)" + gen_var + r"(?!\S)|\b" + gen_var + r"\b(?=\W|$)"
93
+ df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
94
+
95
+ def remove_punctuations(text):
96
+ return re.sub(r'[^\w\s]', '', text)
97
+ df['texts'] = df['texts'].apply(remove_punctuations)
98
+
99
+ remove_morephrases = ["short reads 38 min","bite size 2 min","videos 28 min","long reads 8 min"]
100
+
101
+ for phrase in remove_morephrases:
102
+ df['texts'] = df['texts'].str.replace(phrase, '')
103
+
104
+ df = df[~df['texts'].str.contains(r'\b\d{10}\b')]
105
+
106
+ df['texts'] = df['texts'].str.strip()
107
+
108
+ df['texts'] = df['texts'].apply(lambda x: x.strip())
109
+ df = df[df['texts'] != '']
110
+
111
  return df
112
 
113
  def cluster_data(df, num_clusters):
114
  vectorizer = TfidfVectorizer(stop_words='english')
115
  X = vectorizer.fit_transform(df['texts'])
 
116
 
117
  kmeans = KMeans(n_clusters=num_clusters, random_state=0)
118
  kmeans.fit(X)
119
  df['Cluster'] = kmeans.labels_
120
 
121
+ pca = PCA(n_components=2)
122
+ principal_components = pca.fit_transform(X.toarray())
123
+ df['PCA1'] = principal_components[:, 0]
124
+ df['PCA2'] = principal_components[:, 1]
125
+
126
+ return df, X, kmeans
127
 
128
+ def generate_wordcloud(df):
129
+ text = " ".join(df['texts'].tolist())
130
+ wordcloud = WordCloud(width=800, height=400, background_color='white').generate(text)
131
  plt.figure(figsize=(10, 5))
132
  plt.imshow(wordcloud, interpolation='bilinear')
133
  plt.axis('off')
134
  buf = BytesIO()
135
  plt.savefig(buf, format='png')
136
  buf.seek(0)
137
+ img = Image.open(buf)
138
+ return img
139
+
140
+ def main(file, num_clusters_to_display):
141
+ try:
142
+ df = pd.read_csv(file)
143
+
144
+ # Filter by 'Fallback Message shown'
145
+ df = df[(df['Answer'] == 'Fallback Message shown')]
146
+
147
+ df = preprocess_data(df)
148
+ df, X, kmeans = cluster_data(df, num_clusters=15)
149
+
150
+ cluster_sizes = df['Cluster'].value_counts()
151
+ sorted_clusters = cluster_sizes.index.tolist()
152
+ df['Cluster'] = pd.Categorical(df['Cluster'], categories=sorted_clusters, ordered=True)
153
+ df = df.sort_values('Cluster')
154
+
155
+ # Filter out the largest cluster and get the next largest clusters
156
+ largest_cluster = sorted_clusters[0]
157
+ filtered_clusters = sorted_clusters[1:num_clusters_to_display+1]
158
+
159
+ df = df[df['Cluster'].isin(filtered_clusters)]
160
+ df['Cluster'] = pd.Categorical(df['Cluster'], categories=filtered_clusters, ordered=True)
161
+ df = df.sort_values('Cluster')
162
+
163
+ wordcloud_img = generate_wordcloud(df)
164
+
165
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmpfile:
166
+ df.to_csv(tmpfile.name, index=False)
167
+ csv_file_path = tmpfile.name
168
+
169
+ return csv_file_path, wordcloud_img
170
+ except Exception as e:
171
+ print(f"Error: {e}")
172
+ return str(e), None
173
 
174
  interface = gr.Interface(
175
  fn=main,
176
  inputs=[
177
  gr.File(label="Upload CSV File (.csv)"),
 
178
  gr.Slider(label="Number of Categories to Display", minimum=1, maximum=10, step=1, value=5)
179
  ],
180
  outputs=[
181
+ gr.File(label="Clustered Data CSV"),
182
+ gr.Image(label="Word Cloud")
183
  ],
184
  title="Unanswered User Queries Clustering",
185
  description="Unanswered User Query Categorization"