yifeis02 commited on
Commit
c2e6d10
·
verified ·
1 Parent(s): 20be251

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +454 -0
app.py ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Team members: Yifei Shen, Katherine Tang
2
+ # My own contribution:
3
+
4
+
5
+ ## Mini Project 1 - Part 1: Getting Familiar with Word Embeddings.
6
+ # This assignment introduces students to text similarity measures using cosine similarity and sentence embeddings.
7
+ # Students will implement and compare different methods for computing and analyzing text similarity using GloVe and Sentence Transformers.
8
+
9
+ #Learning Objectives
10
+ #By the end of this assignment, students will:
11
+ #Understand how cosine similarity is used to measure text similarity.
12
+ #Learn to encode sentences using GloVe embeddings and Sentence Transformers.
13
+ #Compare the performance of different embedding techniques.
14
+ #Create a Web interface for your model
15
+
16
+ # Context: In this part, you are going to play around with some commonly used pretrained text embeddings for text search. For example, GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Pretrained on
17
+ # 2 billion tweets with vocabulary size of 1.2 million. Download from [Stanford NLP](http://nlp.stanford.edu/data/glove.twitter.27B.zip).
18
+ # Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. *GloVe: Global Vectors for Word Representation*.
19
+
20
+ ### Import necessary libraries: here you will use streamlit library to run a text search demo, please make sure to install it.
21
+ import streamlit as st
22
+ import numpy as np
23
+ import numpy.linalg as la
24
+ import pickle
25
+ import os
26
+ import gdown
27
+ from sentence_transformers import SentenceTransformer
28
+ import matplotlib.pyplot as plt
29
+ import math
30
+
31
+
32
+
33
+ ### Some predefined utility functions for you to load the text embeddings
34
+
35
+ # Function to Load Glove Embeddings
36
+ def load_glove_embeddings(glove_path="Data/embeddings.pkl"):
37
+ with open(glove_path, "rb") as f:
38
+ embeddings_dict = pickle.load(f, encoding="latin1")
39
+
40
+ return embeddings_dict
41
+
42
+
43
+ def get_model_id_gdrive(model_type):
44
+ if model_type == "25d":
45
+ word_index_id = "13qMXs3-oB9C6kfSRMwbAtzda9xuAUtt8"
46
+ embeddings_id = "1-RXcfBvWyE-Av3ZHLcyJVsps0RYRRr_2"
47
+ elif model_type == "50d":
48
+ embeddings_id = "1DBaVpJsitQ1qxtUvV1Kz7ThDc3az16kZ"
49
+ word_index_id = "1rB4ksHyHZ9skes-fJHMa2Z8J1Qa7awQ9"
50
+ elif model_type == "100d":
51
+ word_index_id = "1-oWV0LqG3fmrozRZ7WB1jzeTJHRUI3mq"
52
+ embeddings_id = "1SRHfX130_6Znz7zbdfqboKosz-PfNvNp"
53
+
54
+ return word_index_id, embeddings_id
55
+
56
+
57
+ def download_glove_embeddings_gdrive(model_type):
58
+ # Get glove embeddings from google drive
59
+ word_index_id, embeddings_id = get_model_id_gdrive(model_type)
60
+
61
+ # Use gdown to get files from google drive
62
+ embeddings_temp = "embeddings_" + str(model_type) + "_temp.npy"
63
+ word_index_temp = "word_index_dict_" + str(model_type) + "_temp.pkl"
64
+
65
+ # Download word_index pickle file
66
+ print("Downloading word index dictionary....\n")
67
+ gdown.download(id=word_index_id, output=word_index_temp, quiet=False)
68
+
69
+ # Download embeddings numpy file
70
+ print("Donwloading embedings...\n\n")
71
+ gdown.download(id=embeddings_id, output=embeddings_temp, quiet=False)
72
+
73
+
74
+ # @st.cache_data()
75
+ def load_glove_embeddings_gdrive(model_type):
76
+ word_index_temp = "word_index_dict_" + str(model_type) + "_temp.pkl"
77
+ embeddings_temp = "embeddings_" + str(model_type) + "_temp.npy"
78
+
79
+ # Load word index dictionary
80
+ word_index_dict = pickle.load(open(word_index_temp, "rb"), encoding="latin")
81
+
82
+ # Load embeddings numpy
83
+ embeddings = np.load(embeddings_temp)
84
+
85
+ return word_index_dict, embeddings
86
+
87
+
88
+ @st.cache_resource()
89
+ def load_sentence_transformer_model(model_name):
90
+ sentenceTransformer = SentenceTransformer(model_name)
91
+ return sentenceTransformer
92
+
93
+
94
+ def get_sentence_transformer_embeddings(sentence, model_name="all-MiniLM-L6-v2"):
95
+ """
96
+ Get sentence transformer embeddings for a sentence
97
+ """
98
+ # 384 dimensional embedding
99
+ # Default model: https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2
100
+
101
+ sentenceTransformer = load_sentence_transformer_model(model_name)
102
+
103
+ try:
104
+ return sentenceTransformer.encode(sentence)
105
+ except:
106
+ if model_name == "all-MiniLM-L6-v2":
107
+ return np.zeros(384)
108
+ else:
109
+ return np.zeros(512)
110
+
111
+
112
+ def get_glove_embeddings(word, word_index_dict, embeddings, model_type):
113
+ """
114
+ Get glove embedding for a single word
115
+ """
116
+ if word.lower() in word_index_dict:
117
+ return embeddings[word_index_dict[word.lower()]]
118
+ else:
119
+ return np.zeros(int(model_type.split("d")[0]))
120
+
121
+
122
+ def get_category_embeddings(embeddings_metadata):
123
+ """
124
+ Get embeddings for each category
125
+ 1. Split categories into words
126
+ 2. Get embeddings for each word
127
+ """
128
+ model_name = embeddings_metadata["model_name"]
129
+ st.session_state["cat_embed_" + model_name] = {}
130
+ for category in st.session_state.categories.split(" "):
131
+ if model_name:
132
+ if not category in st.session_state["cat_embed_" + model_name]:
133
+ st.session_state["cat_embed_" + model_name][category] = get_sentence_transformer_embeddings(category, model_name=model_name)
134
+ else:
135
+ if not category in st.session_state["cat_embed_" + model_name]:
136
+ st.session_state["cat_embed_" + model_name][category] = get_sentence_transformer_embeddings(category)
137
+
138
+
139
+ def update_category_embeddings(embeddings_metadata):
140
+ """
141
+ Update embeddings for each category
142
+ """
143
+ get_category_embeddings(embeddings_metadata)
144
+
145
+
146
+
147
+
148
+ ### Plotting utility functions
149
+
150
+ def plot_piechart(sorted_cosine_scores_items):
151
+ sorted_cosine_scores = np.array([
152
+ sorted_cosine_scores_items[index][1]
153
+ for index in range(len(sorted_cosine_scores_items))
154
+ ]
155
+ )
156
+ categories = st.session_state.categories.split(" ")
157
+ categories_sorted = [
158
+ categories[sorted_cosine_scores_items[index][0]]
159
+ for index in range(len(sorted_cosine_scores_items))
160
+ ]
161
+ fig, ax = plt.subplots()
162
+ ax.pie(sorted_cosine_scores, labels=categories_sorted, autopct="%1.1f%%")
163
+ st.pyplot(fig) # Figure
164
+
165
+
166
+ def plot_piechart_helper(sorted_cosine_scores_items):
167
+ sorted_cosine_scores = np.array(
168
+ [
169
+ sorted_cosine_scores_items[index][1]
170
+ for index in range(len(sorted_cosine_scores_items))
171
+ ]
172
+ )
173
+ categories = st.session_state.categories.split(" ")
174
+ categories_sorted = [
175
+ categories[sorted_cosine_scores_items[index][0]]
176
+ for index in range(len(sorted_cosine_scores_items))
177
+ ]
178
+ fig, ax = plt.subplots(figsize=(3, 3))
179
+ my_explode = np.zeros(len(categories_sorted))
180
+ my_explode[0] = 0.2
181
+ if len(categories_sorted) == 3:
182
+ my_explode[1] = 0.1 # explode this by 0.2
183
+ elif len(categories_sorted) > 3:
184
+ my_explode[2] = 0.05
185
+ ax.pie(
186
+ sorted_cosine_scores,
187
+ labels=categories_sorted,
188
+ autopct="%1.1f%%",
189
+ explode=my_explode,
190
+ )
191
+
192
+ return fig
193
+
194
+
195
+ def plot_piecharts(sorted_cosine_scores_models):
196
+ scores_list = []
197
+ categories = st.session_state.categories.split(" ")
198
+ index = 0
199
+ for model in sorted_cosine_scores_models:
200
+ scores_list.append(sorted_cosine_scores_models[model])
201
+ # scores_list[index] = np.array([scores_list[index][ind2][1] for ind2 in range(len(scores_list[index]))])
202
+ index += 1
203
+
204
+ if len(sorted_cosine_scores_models) == 2:
205
+ fig, (ax1, ax2) = plt.subplots(2)
206
+
207
+ categories_sorted = [
208
+ categories[scores_list[0][index][0]] for index in range(len(scores_list[0]))
209
+ ]
210
+ sorted_scores = np.array(
211
+ [scores_list[0][index][1] for index in range(len(scores_list[0]))]
212
+ )
213
+ ax1.pie(sorted_scores, labels=categories_sorted, autopct="%1.1f%%")
214
+
215
+ categories_sorted = [
216
+ categories[scores_list[1][index][0]] for index in range(len(scores_list[1]))
217
+ ]
218
+ sorted_scores = np.array(
219
+ [scores_list[1][index][1] for index in range(len(scores_list[1]))]
220
+ )
221
+ ax2.pie(sorted_scores, labels=categories_sorted, autopct="%1.1f%%")
222
+
223
+ st.pyplot(fig)
224
+
225
+
226
+ def plot_alatirchart(sorted_cosine_scores_models):
227
+ models = list(sorted_cosine_scores_models.keys())
228
+ tabs = st.tabs(models)
229
+ figs = {}
230
+ for model in models:
231
+ figs[model] = plot_piechart_helper(sorted_cosine_scores_models[model])
232
+
233
+ for index in range(len(tabs)):
234
+ with tabs[index]:
235
+ st.pyplot(figs[models[index]])
236
+
237
+
238
+ ### Your Part To Complete: Follow the instructions in each function below to complete the similarity calculation between text embeddings
239
+
240
+ # Task I: Compute Cosine Similarity
241
+ def cosine_similarity(x, y):
242
+ """
243
+ Exponentiated cosine similarity
244
+ 1. Compute cosine similarity
245
+ 2. Exponentiate cosine similarity
246
+ 3. Return exponentiated cosine similarity
247
+ (20 pts)
248
+ """
249
+ ##################################
250
+ ### TODO: Add code here ##########
251
+ ##################################
252
+ # Compute cosine similarity
253
+ cos_sim = np.dot(x, y) / (la.norm(x) * la.norm(y) + 1e-6)
254
+ # Exponentiate cosine similarity
255
+ return math.exp(cos_sim)
256
+
257
+ # Task II: Average Glove Embedding Calculation
258
+ def averaged_glove_embeddings_gdrive(sentence, word_index_dict, embeddings, model_type=50):
259
+ """
260
+ Get averaged glove embeddings for a sentence
261
+ 1. Split sentence into words
262
+ 2. Get embeddings for each word
263
+ 3. Add embeddings for each word
264
+ 4. Divide by number of words
265
+ 5. Return averaged embeddings
266
+ (30 pts)
267
+ """
268
+ embedding = np.zeros(int(model_type.split("d")[0]))
269
+ ##################################
270
+ ##### TODO: Add code here ########
271
+ ##################################
272
+ words = sentence.split()
273
+ count = 0
274
+ for word in words:
275
+ word_embedding = get_glove_embeddings(word, word_index_dict, embeddings, model_type)
276
+ if word_embedding is not None:
277
+ embedding += word_embedding
278
+ count += 1
279
+ if count > 0:
280
+ embedding /= count
281
+ return embedding
282
+
283
+ # Task III: Sort the cosine similarity
284
+ def get_sorted_cosine_similarity(_, embeddings_metadata):
285
+ """
286
+ Get sorted cosine similarity between input sentence and categories
287
+ Steps:
288
+ 1. Get embeddings for input sentence
289
+ 2. Get embeddings for categories (if not found, update category embeddings)
290
+ 3. Compute cosine similarity between input sentence and categories
291
+ 4. Sort cosine similarity
292
+ 5. Return sorted cosine similarity
293
+ (50 pts)
294
+ """
295
+ categories = st.session_state.categories.split(" ")
296
+ cosine_sim = {}
297
+ if embeddings_metadata["embedding_model"] == "glove":
298
+ word_index_dict = embeddings_metadata["word_index_dict"]
299
+ embeddings = embeddings_metadata["embeddings"]
300
+ model_type = embeddings_metadata["model_type"]
301
+
302
+ input_embedding = averaged_glove_embeddings_gdrive(st.session_state.text_search,
303
+ word_index_dict,
304
+ embeddings, model_type)
305
+
306
+ ##########################################
307
+ ## TODO: Get embeddings for categories ###
308
+ ##########################################
309
+ for index, category in enumerate(categories):
310
+ category_embedding = averaged_glove_embeddings_gdrive(
311
+ category, word_index_dict, embeddings, model_type
312
+ )
313
+ sim_val = cosine_similarity(input_embedding, category_embedding)
314
+ cosine_sim[index] = sim_val
315
+
316
+ else:
317
+ model_name = embeddings_metadata["model_name"]
318
+ if not "cat_embed_" + model_name in st.session_state:
319
+ get_category_embeddings(embeddings_metadata)
320
+
321
+ category_embeddings = st.session_state["cat_embed_" + model_name]
322
+
323
+ print("text_search = ", st.session_state.text_search)
324
+ if model_name:
325
+ input_embedding = get_sentence_transformer_embeddings(st.session_state.text_search, model_name=model_name)
326
+ else:
327
+ input_embedding = get_sentence_transformer_embeddings(st.session_state.text_search)
328
+ for index in range(len(categories)):
329
+ ##########################################
330
+ # TODO: Compute cosine similarity between input sentence and categories
331
+ # TODO: Update category embeddings if category not found
332
+ ##########################################
333
+
334
+ category = categories[index]
335
+ if category not in category_embeddings:
336
+ if model_name:
337
+ category_embeddings[category] = get_sentence_transformer_embeddings(category, model_name=model_name)
338
+ else:
339
+ category_embeddings[category] = get_sentence_transformer_embeddings(category)
340
+ sim_val = cosine_similarity(input_embedding, category_embeddings[category])
341
+ cosine_sim[index] = sim_val
342
+
343
+ sorted_cosine_sim = sorted(cosine_sim.items(), key=lambda x: x[1], reverse=True)
344
+ return sorted_cosine_sim
345
+
346
+
347
+ ### Below is the main function, creating the app demo for text search engine using the text embeddings.
348
+
349
+ if __name__ == "__main__":
350
+ ### Text Search ###
351
+ ### There will be Bonus marks of 10% for the teams that submit a URL for your deployed web app.
352
+ ### Bonus: You can also submit a publicly accessible link to the deployed web app.
353
+
354
+ st.sidebar.title("GloVe Twitter")
355
+ st.sidebar.markdown(
356
+ """
357
+ GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Pretrained on
358
+ 2 billion tweets with vocabulary size of 1.2 million. Download from [Stanford NLP](http://nlp.stanford.edu/data/glove.twitter.27B.zip).
359
+
360
+ Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. *GloVe: Global Vectors for Word Representation*.
361
+ """
362
+ )
363
+
364
+ model_type = st.sidebar.selectbox("Choose the model", ("25d", "50d", "100d"), index=1)
365
+
366
+
367
+ st.title("Search Based Retrieval Demo")
368
+ st.subheader(
369
+ "Pass in space separated categories you want this search demo to be about."
370
+ )
371
+ # st.selectbox(label="Pick the categories you want this search demo to be about...",
372
+ # options=("Flowers Colors Cars Weather Food", "Chocolate Milk", "Anger Joy Sad Frustration Worry Happiness", "Positive Negative"),
373
+ # key="categories"
374
+ # )
375
+ st.text_input(
376
+ label="Categories", key="categories", value="Flowers Colors Cars Weather Food"
377
+ )
378
+ print(st.session_state["categories"])
379
+ print(type(st.session_state["categories"]))
380
+ # print("Categories = ", categories)
381
+ # st.session_state.categories = categories
382
+
383
+ st.subheader("Pass in an input word or even a sentence")
384
+ text_search = st.text_input(
385
+ label="Input your sentence",
386
+ key="text_search",
387
+ value="Roses are red, trucks are blue, and Seattle is grey right now",
388
+ )
389
+ # st.session_state.text_search = text_search
390
+
391
+ # Download glove embeddings if it doesn't exist
392
+ embeddings_path = "embeddings_" + str(model_type) + "_temp.npy"
393
+ word_index_dict_path = "word_index_dict_" + str(model_type) + "_temp.pkl"
394
+ if not os.path.isfile(embeddings_path) or not os.path.isfile(word_index_dict_path):
395
+ print("Model type = ", model_type)
396
+ glove_path = "Data/glove_" + str(model_type) + ".pkl"
397
+ print("glove_path = ", glove_path)
398
+
399
+ # Download embeddings from google drive
400
+ with st.spinner("Downloading glove embeddings..."):
401
+ download_glove_embeddings_gdrive(model_type)
402
+
403
+
404
+ # Load glove embeddings
405
+ word_index_dict, embeddings = load_glove_embeddings_gdrive(model_type)
406
+
407
+
408
+ # Find closest word to an input word
409
+ if st.session_state.text_search:
410
+ # Glove embeddings
411
+ print("Glove Embedding")
412
+ embeddings_metadata = {
413
+ "embedding_model": "glove",
414
+ "word_index_dict": word_index_dict,
415
+ "embeddings": embeddings,
416
+ "model_type": model_type,
417
+ }
418
+ with st.spinner("Obtaining Cosine similarity for Glove..."):
419
+ sorted_cosine_sim_glove = get_sorted_cosine_similarity(
420
+ st.session_state.text_search, embeddings_metadata
421
+ )
422
+
423
+ # Sentence transformer embeddings
424
+ print("Sentence Transformer Embedding")
425
+ embeddings_metadata = {"embedding_model": "transformers", "model_name": ""}
426
+ with st.spinner("Obtaining Cosine similarity for 384d sentence transformer..."):
427
+ sorted_cosine_sim_transformer = get_sorted_cosine_similarity(
428
+ st.session_state.text_search, embeddings_metadata
429
+ )
430
+
431
+ # Results and Plot Pie Chart for Glove
432
+ print("Categories are: ", st.session_state.categories)
433
+ st.subheader(
434
+ "Closest word I have between: "
435
+ + st.session_state.categories
436
+ + " as per different Embeddings"
437
+ )
438
+
439
+ print(sorted_cosine_sim_glove)
440
+ print(sorted_cosine_sim_transformer)
441
+ # print(sorted_distilbert)
442
+ # Altair Chart for all models
443
+ plot_alatirchart(
444
+ {
445
+ "glove_" + str(model_type): sorted_cosine_sim_glove,
446
+ "sentence_transformer_384": sorted_cosine_sim_transformer,
447
+ }
448
+ )
449
+ # "distilbert_512": sorted_distilbert})
450
+
451
+ st.write("")
452
+ st.write(
453
+ "Demo developed by [Yifei Shen](https://www.linkedin.com/in/yifei-shen-7b7973270/)"
454
+ )