tanish78 commited on
Commit
b3eb597
·
verified ·
1 Parent(s): 079161b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -0
app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from sklearn.feature_extraction.text import TfidfVectorizer
4
+ from sklearn.cluster import KMeans
5
+ import matplotlib.pyplot as plt
6
+ from sklearn.decomposition import PCA
7
+ import re
8
+
9
+ def preprocess_data(df):
10
+ #Keep rows with "user" in the "sender" column
11
+ df = df[df['sender'].str.contains('user', case=False)]
12
+ df = df[df['Message Type'].isin(['TEXT']) | df['Message Type'].isin(['POSTBACK'])]
13
+
14
+ # Remove unnecessary columns
15
+ columns_to_drop = ["Timestamp", "sender", "User id", "User Name", "User Email", "User Phone No", "Platform", "Message Type"]
16
+ df = df.drop(columns=columns_to_drop, axis=1)
17
+
18
+ # Lowercase the "Message" column and rename it to "texts"
19
+ df['Message'] = df['Message'].str.lower()
20
+ df.rename(columns={'Message': 'texts'}, inplace=True)
21
+
22
+ # Convert the 'texts' column to string
23
+ df['texts'] = df['texts'].astype(str)
24
+
25
+ # Remove URL from text
26
+ df['texts'] = df['texts'].apply(lambda text: re.sub(r'https?://\S+|www\.\S+', '', text))
27
+
28
+ # Remove emojis from text
29
+ def remove_emoji(string):
30
+ emoji_pattern = re.compile("["
31
+ u"\U0001F600-\U0001F64F"
32
+ u"\U0001F300-\U0001F5FF"
33
+ u"\U0001F680-\U0001F6FF"
34
+ u"\U0001F1E0-\U0001F1FF"
35
+ u"\U00002702-\U000027B0"
36
+ u"\U000024C2-\U0001F251"
37
+ "]+", flags=re.UNICODE)
38
+ return emoji_pattern.sub(r'', string) if isinstance(string, str) else string
39
+
40
+ df['texts'] = df['texts'].apply(remove_emoji)
41
+
42
+ # Define synonyms
43
+ custom_synonyms = {
44
+ 'application': ['form'],
45
+ 'apply': ['fill', 'applied'],
46
+ 'work': ['job'],
47
+ 'salary': ['stipend', 'pay', 'payment', 'paid'],
48
+ 'test': ['online test', 'amcat test', 'exam', 'assessment'],
49
+ 'pass': ['clear', 'selected', 'pass or not'],
50
+ 'result': ['outcome', 'mark', 'marks'],
51
+ 'thanks': ["thanks a lot to you", "thankyou so much", "thank you so much", "tysm", "thank you",
52
+ "okaythank", "thx", "ty", "thankyou", "thank", "thank u"],
53
+ 'interview': ["pi"]
54
+ }
55
+
56
+ # Replace synonyms in the 'texts' column
57
+ for original_word, synonym_list in custom_synonyms.items():
58
+ for synonym in synonym_list:
59
+ pattern = r"\b" + synonym + r"\b(?!\s*\()" # match whole word and exclude words in parentheses
60
+ df['texts'] = df['texts'].str.replace(pattern, original_word, regex=True)
61
+ pattern = r"\b" + synonym + r"\s+you" + r"\b(?!\s*\()" # match whole word followed by optional whitespace and "you"
62
+ df['texts'] = df['texts'].str.replace(pattern, original_word + ' ', regex=True)
63
+
64
+ # Define list of spam words or phrases
65
+ spam_list = ["click here", "free", "recharge", "limited", "discount", "money back guarantee", "aaj", "kal", "mein",
66
+ "how can i help you", "how can we help you", "how we can help you", "follow", "king", "contacting", "gar",
67
+ "kirke", "subscribe", "youtube", "jio", "insta", "make money", "b2b","sent using truecaller"]
68
+
69
+ # Remove any row that contains a spam phrase
70
+ rows_to_remove = set()
71
+ for spam_phrase in spam_list:
72
+ pattern = r"\b" + re.escape(spam_phrase) + r"\b"
73
+ spam_rows = df['texts'].str.contains(pattern)
74
+ rows_to_remove.update(df.index[spam_rows].tolist())
75
+
76
+ df = df.drop(rows_to_remove)
77
+
78
+ # Drop rows containing any greetings and its variations
79
+ greet_variations = ["hello", "hy", "hey", "hii", "hi", "heyyy", "bie", "bye"]
80
+ for greet_var in greet_variations:
81
+ pattern = r"(?<!\S)" + greet_var + r"(?!\S)|\b" + greet_var + r"\b"
82
+ df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
83
+
84
+ # Drop rows containing any okay response and its variations
85
+ okay_variations = ["ok", "k", "kay", "okay", "okie", "kk", "ohhhk","t","r"]
86
+ for okay_var in okay_variations:
87
+ pattern = r"(?<!\S)" + okay_var + r"(?!\S)|\b" + okay_var + r"\b"
88
+ df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
89
+
90
+ # Drop rows containing any yes response and its variations
91
+ yes_variations = ["yes", "yeah", "yep", "yup", "yuh", "ya", "yes got it", "yeah it is", "yesss", "yea","no"]
92
+ for yes_var in yes_variations:
93
+ pattern = r"(?<!\S)" + yes_var + r"(?!\S)|\b" + yes_var + r"\b"
94
+ df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
95
+
96
+ # Remove specific phrases from the "texts" column
97
+ remove_phrases = ["i'm all set","ask a question","apply the survey","videos (2-8 min)","long reads (> 8 min)",
98
+ "short reads (3-8 min)","not a student alumni","mock","share feedback","bite size (< 2 min)",
99
+ "actually no","next steps","i'm a student alumni","i have questions"]
100
+
101
+ for phrase in remove_phrases:
102
+ df['texts'] = df['texts'].str.replace(phrase, '')
103
+
104
+
105
+ # Drop rows containing any general words from response and its variations
106
+ general_variations = ["good morning", "good evening", "good afternoon", "good night", "done", "sorry", "top", "query",
107
+ "stop", "sir", "sure", "oh", "wow", "aaa", "maam", "mam", "ma&#39;am","i'm all set","ask a question","apply the survey",
108
+ "videos (2-8 min)","long reads (> 8 min)","short reads (3-8 min)","not a student alumni","mock","share feedback","bite size (< 2 min)",
109
+ "actually no","next steps","i'm a student alumni","i have questions"]
110
+ for gen_var in general_variations:
111
+ pattern = r"(?<!\S)" + gen_var + r"(?!\S)|\b" + gen_var + r"\b(?=\W|$)"
112
+ df['texts'] = df['texts'].str.replace(pattern, '', regex=True)
113
+
114
+ def remove_punctuations(text):
115
+ return re.sub(r'[^\w\s]', '', text)
116
+ df['texts'] = df['texts'].apply(remove_punctuations)
117
+
118
+ # Remove specific phrases from the "texts" column
119
+ remove_morephrases = ["short reads 38 min","bite size 2 min","videos 28 min","long reads 8 min"]
120
+
121
+ for phrase in remove_morephrases:
122
+ df['texts'] = df['texts'].str.replace(phrase, '')
123
+
124
+ # Remove rows with phone numbers in the 'texts' column
125
+ df = df[~df['texts'].str.contains(r'\b\d{10}\b')]
126
+
127
+ # Remove any leading or trailing whitespaces
128
+ df['texts'] = df['texts'].str.strip()
129
+
130
+ # Remove blank rows
131
+ df['texts'] = df['texts'].apply(lambda x: x.strip()) # Remove leading and trailing whitespaces
132
+ df = df[df['texts'] != '']
133
+
134
+ return df
135
+
136
+ def cluster_data(df, num_clusters=5):
137
+ # Vectorize the text data
138
+ vectorizer = TfidfVectorizer(stop_words='english')
139
+ X = vectorizer.fit_transform(df['texts'])
140
+
141
+ # Perform K-Means clustering
142
+ kmeans = KMeans(n_clusters=num_clusters, random_state=0)
143
+ kmeans.fit(X)
144
+ df['Cluster'] = kmeans.labels_
145
+
146
+ # Perform PCA to reduce dimensions for visualization
147
+ pca = PCA(n_components=2)
148
+ principal_components = pca.fit_transform(X.toarray())
149
+ df['PCA1'] = principal_components[:, 0]
150
+ df['PCA2'] = principal_components[:, 1]
151
+
152
+ return df
153
+
154
+ def visualize_clusters(df):
155
+ plt.figure(figsize=(10, 6))
156
+ scatter = plt.scatter(df['PCA1'], df['PCA2'], c=df['Cluster'], cmap='viridis')
157
+ plt.legend(*scatter.legend_elements(), title="Clusters")
158
+ plt.title('Clusters of User Queries')
159
+ plt.xlabel('PCA Component 1')
160
+ plt.ylabel('PCA Component 2')
161
+ plt.show()
162
+
163
+ def main(file, num_clusters):
164
+ df = pd.read_excel(file.name)
165
+ df = preprocess_data(df)
166
+ df = cluster_data(df, num_clusters)
167
+ visualize_clusters(df)
168
+ return df
169
+
170
+ interface = gr.Interface(
171
+ fn=main,
172
+ inputs=[
173
+ gr.inputs.File(label="Upload Excel File (.xlsx)", type='file'),
174
+ gr.inputs.Number(default=5, label="Number of Clusters")
175
+ ],
176
+ outputs=gr.outputs.Dataframe(label="Clustered Data"),
177
+ title="Unanswered User Queries Clustering",
178
+ description="Upload an Excel file (.xlsx) containing user queries, and the algorithm will automatically cluster the queries into different categories."
179
+ )
180
+
181
+ interface.launch()