tanish78 commited on
Commit
2d3b7b6
·
verified ·
1 Parent(s): 52378d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -108
app.py CHANGED
@@ -2,113 +2,19 @@ import gradio as gr
2
  import pandas as pd
3
  from sklearn.feature_extraction.text import TfidfVectorizer
4
  from sklearn.cluster import KMeans
 
5
  import matplotlib.pyplot as plt
6
  from sklearn.decomposition import PCA
7
  import re
8
  from io import BytesIO
9
  import tempfile
 
10
 
11
  def preprocess_data(df):
12
- df.rename(columns={'Question Asked': 'texts'}, inplace=True)
13
- df['texts'] = df['texts'].astype(str)
14
- df['texts'] = df['texts'].str.lower()
15
- df['texts'] = df['texts'].apply(lambda text: re.sub(r'https?://\S+|www\.\S+', '', text))
16
-
17
- def remove_emoji(string):
18
- emoji_pattern = re.compile("["
19
- u"\U0001F600-\U0001F64F"
20
- u"\U0001F300-\U0001F5FF"
21
- u"\U0001F680-\U0001F6FF"
22
- u"\U0001F1E0-\U0001F1FF"
23
- u"\U00002702-\U000027B0"
24
- u"\U000024C2-\U0001F251"
25
- "]+", flags=re.UNICODE)
26
- return emoji_pattern.sub(r'', string) if isinstance(string, str) else string
27
-
28
- df['texts'] = df['texts'].apply(remove_emoji)
29
-
30
- custom_synonyms = {
31
- 'application': ['form'],
32
- 'apply': ['fill', 'applied'],
33
- 'work': ['job'],
34
- 'salary': ['stipend', 'pay', 'payment', 'paid'],
35
- 'test': ['online test', 'amcat test', 'exam', 'assessment'],
36
- 'pass': ['clear', 'selected', 'pass or not'],
37
- 'result': ['outcome', 'mark', 'marks'],
38
- 'thanks': ["thanks a lot to you", "thankyou so much", "thank you so much", "tysm", "thank you",
39
- "okaythank", "thx", "ty", "thankyou", "thank", "thank u"],
40
- 'interview': ["pi"]
41
- }
42
-
43
- for original_word, synonym_list in custom_synonyms.items():
44
- for synonym in synonym_list:
45
- pattern = r"\b" + synonym + r"\b(?!\s*\()"
46
- df['texts'] = df['texts'].str.replace(pattern, original_word, regex=True)
47
- pattern = r"\b" + synonym + r"\s+you" + r"\b(?!\s*\()"
48
- df['texts'] = df['texts'].str.replace(pattern, original_word + ' ', regex=True)
49
-
50
- spam_list = ["click here", "free", "recharge", "limited", "discount", "money back guarantee", "aaj", "kal", "mein",
51
- "how can i help you", "how can we help you", "how we can help you", "follow", "king", "contacting", "gar",
52
- "kirke", "subscribe", "youtube", "jio", "insta", "make money", "b2b","sent using truecaller"]
53
-
54
- rows_to_remove = set()
55
- for spam_phrase in spam_list:
56
- pattern = r"\b" + re.escape(spam_phrase) + r"\b"
57
- spam_rows = df['texts'].str.contains(pattern)
58
- rows_to_remove.update(df.index[spam_rows].tolist())
59
-
60
- df = df.drop(rows_to_remove)
61
-
62
- greet_variations = ["hello", "hy", "hey", "hii", "hi", "heyyy", "bie", "bye"]
63
- for greet_var in greet_variations:
64
- pattern = r"(?<!\S)" + greet_var + r"(?!\S)|\b" + greet_var + r"\b"
65
- df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
66
-
67
- okay_variations = ["ok", "k", "kay", "okay", "okie", "kk", "ohhhk","t","r"]
68
- for okay_var in okay_variations:
69
- pattern = r"(?<!\S)" + okay_var + r"(?!\S)|\b" + okay_var + r"\b"
70
- df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
71
-
72
- yes_variations = ["yes", "yeah", "yep", "yup", "yuh", "ya", "yes got it", "yeah it is", "yesss", "yea","no"]
73
- for yes_var in yes_variations:
74
- pattern = r"(?<!\S)" + yes_var + r"(?!\S)|\b" + yes_var + r"\b"
75
- df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
76
-
77
- remove_phrases = ["i'm all set","ask a question","apply the survey","videos (2-8 min)","long reads (> 8 min)",
78
- "short reads (3-8 min)","not a student alumni","mock","share feedback","bite size (< 2 min)",
79
- "actually no","next steps","i'm a student alumni","i have questions"]
80
-
81
- for phrase in remove_phrases:
82
- df['texts'] = df['texts'].str.replace(phrase, '')
83
-
84
- general_variations = ["good morning", "good evening", "good afternoon", "good night", "done", "sorry", "top", "query",
85
- "stop", "sir", "sure", "oh", "wow", "aaa", "maam", "mam", "ma&#39;am","i'm all set","ask a question","apply the survey",
86
- "videos (2-8 min)","long reads (> 8 min)","short reads (3-8 min)","not a student alumni","mock","share feedback","bite size (< 2 min)",
87
- "actually no","next steps","i'm a student alumni","i have questions"]
88
- for gen_var in general_variations:
89
- pattern = r"(?<!\S)" + gen_var + r"(?!\S)|\b" + gen_var + r"\b(?=\W|$)"
90
- df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
91
-
92
- def remove_punctuations(text):
93
- return re.sub(r'[^\w\s]', '', text)
94
- df['texts'] = df['texts'].apply(remove_punctuations)
95
-
96
- remove_morephrases = ["short reads 38 min","bite size 2 min","videos 28 min","long reads 8 min"]
97
-
98
- for phrase in remove_morephrases:
99
- df['texts'] = df['texts'].str.replace(phrase, '')
100
-
101
- df = df[~df['texts'].str.contains(r'\b\d{10}\b')]
102
-
103
- df['texts'] = df['texts'].str.strip()
104
-
105
- df['texts'] = df['texts'].apply(lambda x: x.strip())
106
- df = df[df['texts'] != '']
107
-
108
- return df
109
-
110
- def cluster_data(df):
111
- num_clusters = 15 # Set the number of clusters
112
  vectorizer = TfidfVectorizer(stop_words='english')
113
  X = vectorizer.fit_transform(df['texts'])
114
 
@@ -121,7 +27,7 @@ def cluster_data(df):
121
  df['PCA1'] = principal_components[:, 0]
122
  df['PCA2'] = principal_components[:, 1]
123
 
124
- return df
125
 
126
  def visualize_clusters(df):
127
  plt.figure(figsize=(10, 6))
@@ -132,6 +38,35 @@ def visualize_clusters(df):
132
  plt.ylabel('PCA Component 2')
133
  plt.show()
134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  def main(file, num_clusters_to_display):
136
  try:
137
  df = pd.read_csv(file)
@@ -140,24 +75,33 @@ def main(file, num_clusters_to_display):
140
  df = df[(df['Answer'] == 'Fallback Message shown')]
141
 
142
  df = preprocess_data(df)
143
- df = cluster_data(df)
144
  visualize_clusters(df)
145
 
146
  cluster_sizes = df['Cluster'].value_counts()
147
  sorted_clusters = cluster_sizes.index.tolist()
148
-
149
- # Exclude the largest cluster
150
- largest_cluster = sorted_clusters[0]
151
- filtered_clusters = [cluster for cluster in sorted_clusters if cluster != largest_cluster]
 
152
  top_clusters = filtered_clusters[:num_clusters_to_display]
153
 
154
  df = df[df['Cluster'].isin(top_clusters)]
155
  df['Cluster'] = pd.Categorical(df['Cluster'], categories=top_clusters, ordered=True)
156
  df = df.sort_values('Cluster')
157
 
 
 
 
158
  with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmpfile:
159
  df.to_csv(tmpfile.name, index=False)
160
- return tmpfile.name
 
 
 
 
 
161
  except Exception as e:
162
  return str(e)
163
 
@@ -167,7 +111,11 @@ interface = gr.Interface(
167
  gr.File(label="Upload CSV File (.csv)"),
168
  gr.Slider(label="Number of Categories to Display", minimum=1, maximum=10, step=1, value=5)
169
  ],
170
- outputs=gr.File(label="Clustered Data CSV"),
 
 
 
 
171
  title="Unanswered User Queries Clustering",
172
  description="Unanswered User Query Categorization"
173
  )
 
2
  import pandas as pd
3
  from sklearn.feature_extraction.text import TfidfVectorizer
4
  from sklearn.cluster import KMeans
5
+ from sklearn.metrics import silhouette_score, silhouette_samples
6
  import matplotlib.pyplot as plt
7
  from sklearn.decomposition import PCA
8
  import re
9
  from io import BytesIO
10
  import tempfile
11
+ import numpy as np
12
 
13
  def preprocess_data(df):
14
+ # Your preprocessing code here
15
+ pass
16
+
17
+ def cluster_data(df, num_clusters):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  vectorizer = TfidfVectorizer(stop_words='english')
19
  X = vectorizer.fit_transform(df['texts'])
20
 
 
27
  df['PCA1'] = principal_components[:, 0]
28
  df['PCA2'] = principal_components[:, 1]
29
 
30
+ return df, X, kmeans
31
 
32
  def visualize_clusters(df):
33
  plt.figure(figsize=(10, 6))
 
38
  plt.ylabel('PCA Component 2')
39
  plt.show()
40
 
41
+ def silhouette_analysis(X, labels, num_clusters):
42
+ fig, ax1 = plt.subplots(1, 1)
43
+ fig.set_size_inches(10, 6)
44
+
45
+ ax1.set_xlim([-0.1, 1])
46
+ ax1.set_ylim([0, len(X) + (num_clusters + 1) * 10])
47
+
48
+ sample_silhouette_values = silhouette_samples(X, labels)
49
+ y_lower = 10
50
+ for i in range(num_clusters):
51
+ ith_cluster_silhouette_values = sample_silhouette_values[labels == i]
52
+ ith_cluster_silhouette_values.sort()
53
+ size_cluster_i = ith_cluster_silhouette_values.shape[0]
54
+ y_upper = y_lower + size_cluster_i
55
+ color = plt.cm.nipy_spectral(float(i) / num_clusters)
56
+ ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values,
57
+ facecolor=color, edgecolor=color, alpha=0.7)
58
+ ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
59
+ y_lower = y_upper + 10
60
+
61
+ ax1.set_title("The silhouette plot for the various clusters.")
62
+ ax1.set_xlabel("The silhouette coefficient values")
63
+ ax1.set_ylabel("Cluster label")
64
+ ax1.axvline(x=np.mean(sample_silhouette_values), color="red", linestyle="--")
65
+ ax1.set_yticks([])
66
+ ax1.set_xticks([i/10.0 for i in range(-1, 11)])
67
+
68
+ return fig
69
+
70
  def main(file, num_clusters_to_display):
71
  try:
72
  df = pd.read_csv(file)
 
75
  df = df[(df['Answer'] == 'Fallback Message shown')]
76
 
77
  df = preprocess_data(df)
78
+ df, X, kmeans = cluster_data(df, num_clusters=15)
79
  visualize_clusters(df)
80
 
81
  cluster_sizes = df['Cluster'].value_counts()
82
  sorted_clusters = cluster_sizes.index.tolist()
83
+ df['Cluster'] = pd.Categorical(df['Cluster'], categories=sorted_clusters, ordered=True)
84
+ df = df.sort_values('Cluster')
85
+
86
+ # Filter out base cluster and get the largest clusters
87
+ filtered_clusters = [cluster for cluster in sorted_clusters if cluster != 10]
88
  top_clusters = filtered_clusters[:num_clusters_to_display]
89
 
90
  df = df[df['Cluster'].isin(top_clusters)]
91
  df['Cluster'] = pd.Categorical(df['Cluster'], categories=top_clusters, ordered=True)
92
  df = df.sort_values('Cluster')
93
 
94
+ silhouette_avg = silhouette_score(X, kmeans.labels_)
95
+ silhouette_plot = silhouette_analysis(X, kmeans.labels_, num_clusters=15)
96
+
97
  with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as tmpfile:
98
  df.to_csv(tmpfile.name, index=False)
99
+
100
+ return {
101
+ "Clustered Data CSV": tmpfile.name,
102
+ "Silhouette Score": silhouette_avg,
103
+ "Silhouette Plot": silhouette_plot
104
+ }
105
  except Exception as e:
106
  return str(e)
107
 
 
111
  gr.File(label="Upload CSV File (.csv)"),
112
  gr.Slider(label="Number of Categories to Display", minimum=1, maximum=10, step=1, value=5)
113
  ],
114
+ outputs=[
115
+ gr.File(label="Clustered Data CSV"),
116
+ gr.Number(label="Silhouette Score"),
117
+ gr.Plot(label="Silhouette Plot")
118
+ ],
119
  title="Unanswered User Queries Clustering",
120
  description="Unanswered User Query Categorization"
121
  )