yuanjunchai commited on
Commit
808bbf8
·
1 Parent(s): c2120a8

add application files

Browse files
Files changed (2) hide show
  1. app.py +463 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ## Mini Project 1 - Part 1: Getting Familiar with Word Embeddings.
3
+ # This assignment introduces students to text similarity measures using cosine similarity and sentence embeddings.
4
+ # Students will implement and compare different methods for computing and analyzing text similarity using GloVe and Sentence Transformers.
5
+
6
+ #Learning Objectives
7
+ #By the end of this assignment, students will:
8
+ #Understand how cosine similarity is used to measure text similarity.
9
+ #Learn to encode sentences using GloVe embeddings and Sentence Transformers.
10
+ #Compare the performance of different embedding techniques.
11
+ #Create a Web interface for your model
12
+
13
+ # Context: In this part, you are going to play around with some commonly used pretrained text embeddings for text search. For example, GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Pretrained on
14
+ # 2 billion tweets with vocabulary size of 1.2 million. Download from [Stanford NLP](http://nlp.stanford.edu/data/glove.twitter.27B.zip).
15
+ # Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. *GloVe: Global Vectors for Word Representation*.
16
+
17
+ ### Import necessary libraries: here you will use streamlit library to run a text search demo, please make sure to install it.
18
+ import streamlit as st
19
+ import numpy as np
20
+ import numpy.linalg as la
21
+ import pickle
22
+ import os
23
+ import gdown
24
+ from sentence_transformers import SentenceTransformer
25
+ import matplotlib.pyplot as plt
26
+ import math
27
+
28
+
29
+
30
+ ### Some predefined utility functions for you to load the text embeddings
31
+
32
+ # Function to Load Glove Embeddings
33
+ def load_glove_embeddings(glove_path="Data/embeddings.pkl"):
34
+ with open(glove_path, "rb") as f:
35
+ embeddings_dict = pickle.load(f, encoding="latin1")
36
+
37
+ return embeddings_dict
38
+
39
+
40
+ def get_model_id_gdrive(model_type):
41
+ if model_type == "25d":
42
+ word_index_id = "13qMXs3-oB9C6kfSRMwbAtzda9xuAUtt8"
43
+ embeddings_id = "1-RXcfBvWyE-Av3ZHLcyJVsps0RYRRr_2"
44
+ elif model_type == "50d":
45
+ embeddings_id = "1DBaVpJsitQ1qxtUvV1Kz7ThDc3az16kZ"
46
+ word_index_id = "1rB4ksHyHZ9skes-fJHMa2Z8J1Qa7awQ9"
47
+ elif model_type == "100d":
48
+ word_index_id = "1-oWV0LqG3fmrozRZ7WB1jzeTJHRUI3mq"
49
+ embeddings_id = "1SRHfX130_6Znz7zbdfqboKosz-PfNvNp"
50
+
51
+ return word_index_id, embeddings_id
52
+
53
+
54
+ def download_glove_embeddings_gdrive(model_type):
55
+ # Get glove embeddings from google drive
56
+ word_index_id, embeddings_id = get_model_id_gdrive(model_type)
57
+
58
+ # Use gdown to get files from google drive
59
+ embeddings_temp = "embeddings_" + str(model_type) + "_temp.npy"
60
+ word_index_temp = "word_index_dict_" + str(model_type) + "_temp.pkl"
61
+
62
+ # Download word_index pickle file
63
+ print("Downloading word index dictionary....\n")
64
+ gdown.download(id=word_index_id, output=word_index_temp, quiet=False)
65
+
66
+ # Download embeddings numpy file
67
+ print("Donwloading embedings...\n\n")
68
+ gdown.download(id=embeddings_id, output=embeddings_temp, quiet=False)
69
+
70
+
71
+ # @st.cache_data()
72
+ def load_glove_embeddings_gdrive(model_type):
73
+ word_index_temp = "word_index_dict_" + str(model_type) + "_temp.pkl"
74
+ embeddings_temp = "embeddings_" + str(model_type) + "_temp.npy"
75
+
76
+ # Load word index dictionary
77
+ word_index_dict = pickle.load(open(word_index_temp, "rb"), encoding="latin")
78
+
79
+ # Load embeddings numpy
80
+ embeddings = np.load(embeddings_temp)
81
+
82
+ return word_index_dict, embeddings
83
+
84
+
85
+ @st.cache_resource()
86
+ def load_sentence_transformer_model(model_name):
87
+ sentenceTransformer = SentenceTransformer(model_name)
88
+ return sentenceTransformer
89
+
90
+
91
+ def get_sentence_transformer_embeddings(sentence, model_name="all-MiniLM-L6-v2"):
92
+ """
93
+ Get sentence transformer embeddings for a sentence
94
+ """
95
+ # 384 dimensional embedding
96
+ # Default model: https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2
97
+
98
+ sentenceTransformer = load_sentence_transformer_model(model_name)
99
+
100
+ try:
101
+ return sentenceTransformer.encode(sentence)
102
+ except:
103
+ if model_name == "all-MiniLM-L6-v2":
104
+ return np.zeros(384)
105
+ else:
106
+ return np.zeros(512)
107
+
108
+
109
+ def get_glove_embeddings(word, word_index_dict, embeddings, model_type):
110
+ """
111
+ Get glove embedding for a single word
112
+ """
113
+ if word.lower() in word_index_dict:
114
+ return embeddings[word_index_dict[word.lower()]]
115
+ else:
116
+ return np.zeros(int(model_type.split("d")[0]))
117
+
118
+
119
+ def get_category_embeddings(embeddings_metadata):
120
+ """
121
+ Get embeddings for each category
122
+ 1. Split categories into words
123
+ 2. Get embeddings for each word
124
+ """
125
+ model_name = embeddings_metadata["model_name"]
126
+ st.session_state["cat_embed_" + model_name] = {}
127
+ for category in st.session_state.categories.split(" "):
128
+ if model_name:
129
+ if not category in st.session_state["cat_embed_" + model_name]:
130
+ st.session_state["cat_embed_" + model_name][category] = get_sentence_transformer_embeddings(category, model_name=model_name)
131
+ else:
132
+ if not category in st.session_state["cat_embed_" + model_name]:
133
+ st.session_state["cat_embed_" + model_name][category] = get_sentence_transformer_embeddings(category)
134
+
135
+
136
+ def update_category_embeddings(embeddings_metadata):
137
+ """
138
+ Update embeddings for each category
139
+ """
140
+ get_category_embeddings(embeddings_metadata)
141
+
142
+
143
+
144
+
145
+ ### Plotting utility functions
146
+
147
+ def plot_piechart(sorted_cosine_scores_items):
148
+ sorted_cosine_scores = np.array([
149
+ sorted_cosine_scores_items[index][1]
150
+ for index in range(len(sorted_cosine_scores_items))
151
+ ]
152
+ )
153
+ categories = st.session_state.categories.split(" ")
154
+ categories_sorted = [
155
+ categories[sorted_cosine_scores_items[index][0]]
156
+ for index in range(len(sorted_cosine_scores_items))
157
+ ]
158
+ fig, ax = plt.subplots()
159
+ ax.pie(sorted_cosine_scores, labels=categories_sorted, autopct="%1.1f%%")
160
+ st.pyplot(fig) # Figure
161
+
162
+
163
+ def plot_piechart_helper(sorted_cosine_scores_items):
164
+ sorted_cosine_scores = np.array(
165
+ [
166
+ sorted_cosine_scores_items[index][1]
167
+ for index in range(len(sorted_cosine_scores_items))
168
+ ]
169
+ )
170
+ categories = st.session_state.categories.split(" ")
171
+ categories_sorted = [
172
+ categories[sorted_cosine_scores_items[index][0]]
173
+ for index in range(len(sorted_cosine_scores_items))
174
+ ]
175
+ fig, ax = plt.subplots(figsize=(3, 3))
176
+ my_explode = np.zeros(len(categories_sorted))
177
+ my_explode[0] = 0.2
178
+ if len(categories_sorted) == 3:
179
+ my_explode[1] = 0.1 # explode this by 0.2
180
+ elif len(categories_sorted) > 3:
181
+ my_explode[2] = 0.05
182
+ ax.pie(
183
+ sorted_cosine_scores,
184
+ labels=categories_sorted,
185
+ autopct="%1.1f%%",
186
+ explode=my_explode,
187
+ )
188
+
189
+ return fig
190
+
191
+
192
+ def plot_piecharts(sorted_cosine_scores_models):
193
+ scores_list = []
194
+ categories = st.session_state.categories.split(" ")
195
+ index = 0
196
+ for model in sorted_cosine_scores_models:
197
+ scores_list.append(sorted_cosine_scores_models[model])
198
+ # scores_list[index] = np.array([scores_list[index][ind2][1] for ind2 in range(len(scores_list[index]))])
199
+ index += 1
200
+
201
+ if len(sorted_cosine_scores_models) == 2:
202
+ fig, (ax1, ax2) = plt.subplots(2)
203
+
204
+ categories_sorted = [
205
+ categories[scores_list[0][index][0]] for index in range(len(scores_list[0]))
206
+ ]
207
+ sorted_scores = np.array(
208
+ [scores_list[0][index][1] for index in range(len(scores_list[0]))]
209
+ )
210
+ ax1.pie(sorted_scores, labels=categories_sorted, autopct="%1.1f%%")
211
+
212
+ categories_sorted = [
213
+ categories[scores_list[1][index][0]] for index in range(len(scores_list[1]))
214
+ ]
215
+ sorted_scores = np.array(
216
+ [scores_list[1][index][1] for index in range(len(scores_list[1]))]
217
+ )
218
+ ax2.pie(sorted_scores, labels=categories_sorted, autopct="%1.1f%%")
219
+
220
+ st.pyplot(fig)
221
+
222
+
223
+ def plot_alatirchart(sorted_cosine_scores_models):
224
+ models = list(sorted_cosine_scores_models.keys())
225
+ tabs = st.tabs(models)
226
+ figs = {}
227
+ for model in models:
228
+ figs[model] = plot_piechart_helper(sorted_cosine_scores_models[model])
229
+
230
+ for index in range(len(tabs)):
231
+ with tabs[index]:
232
+ st.pyplot(figs[models[index]])
233
+
234
+
235
+ ### Your Part To Complete: Follow the instructions in each function below to complete the similarity calculation between text embeddings
236
+
237
+ # Task I: Compute Cosine Similarity
238
+ def cosine_similarity(x, y):
239
+ """
240
+ Exponentiated cosine similarity
241
+ 1. Compute cosine similarity
242
+ 2. Exponentiate cosine similarity
243
+ 3. Return exponentiated cosine similarity
244
+ (20 pts)
245
+ """
246
+
247
+ # 点积计算
248
+
249
+ dot_product = np.dot(x, y)
250
+
251
+ norm_x = np.linalg.norm(x)
252
+ norm_y = np.linalg.norm(y)
253
+
254
+ if norm_x == 0 or norm_y == 0:
255
+ cosine_sim = 0
256
+ else:
257
+ cosine_sim = dot_product / (norm_x * norm_y)
258
+
259
+
260
+ exp_cosine_sim = math.exp(cosine_sim)
261
+
262
+ return exp_cosine_sim
263
+
264
+
265
+
266
+ # Task II: Average Glove Embedding Calculation
267
+ def averaged_glove_embeddings_gdrive(sentence, word_index_dict, embeddings, model_type=50):
268
+ """
269
+ Get averaged glove embeddings for a sentence
270
+ 1. Split sentence into words
271
+ 2. Get embeddings for each word
272
+ 3. Add embeddings for each word
273
+ 4. Divide by number of words
274
+ 5. Return averaged embeddings
275
+ (30 pts)
276
+ """
277
+ # 分割句子,遍历单词,计算平均嵌入
278
+
279
+ embedding_dim = np.zeros(int(model_type.split("d")[0]))
280
+ embedding = np.zeros(embedding_dim)
281
+
282
+ words = sentence.split()
283
+
284
+ valid_word_count = 0
285
+
286
+ for word in words:
287
+ if word.lower() in word_index_dict:
288
+ embedding += embeddings[word_index_dict[word.lower()]]
289
+ valid_word_count += 1
290
+
291
+ if valid_word_count > 0:
292
+ embedding /= valid_word_count
293
+
294
+ return embedding
295
+
296
+
297
+ # Task III: Sort the cosine similarity
298
+ def get_sorted_cosine_similarity(embeddings_metadata):
299
+ """
300
+ Get sorted cosine similarity between input sentence and categories
301
+ Steps:
302
+ 1. Get embeddings for input sentence
303
+ 2. Get embeddings for categories (if not found, update category embeddings)
304
+ 3. Compute cosine similarity between input sentence and categories
305
+ 4. Sort cosine similarity
306
+ 5. Return sorted cosine similarity
307
+ (50 pts)
308
+ """
309
+ categories = st.session_state.categories.split(" ")
310
+ cosine_sim = {}
311
+ if embeddings_metadata["embedding_model"] == "glove":
312
+ word_index_dict = embeddings_metadata["word_index_dict"]
313
+ embeddings = embeddings_metadata["embeddings"]
314
+ model_type = embeddings_metadata["model_type"]
315
+
316
+ input_embedding = averaged_glove_embeddings_gdrive(st.session_state.text_search,
317
+ word_index_dict,
318
+ embeddings, model_type)
319
+
320
+ for idx, category in enumerate(categories):
321
+ category_embedding = averaged_glove_embeddings_gdrive(
322
+ category, word_index_dict, embeddings, model_type
323
+ )
324
+ cosine_sim[idx] = cosine_similarity(input_embedding, category_embedding)
325
+
326
+ else:
327
+ model_name = embeddings_metadata["model_name"]
328
+ if not "cat_embed_" + model_name in st.session_state:
329
+ get_category_embeddings(embeddings_metadata)
330
+
331
+ category_embeddings = st.session_state["cat_embed_" + model_name]
332
+
333
+ print("text_search = ", st.session_state.text_search)
334
+ if model_name:
335
+ input_embedding = get_sentence_transformer_embeddings(st.session_state.text_search, model_name=model_name)
336
+ else:
337
+ input_embedding = get_sentence_transformer_embeddings(st.session_state.text_search)
338
+ for index in range(len(categories)):
339
+ category = categories[index]
340
+
341
+ if category in category_embeddings:
342
+ category_embedding = category_embeddings[category]
343
+ else:
344
+ category_embedding = get_sentence_transformer_embeddings(category, model_name=model_name)
345
+ st.session_state["cat_embed_" + model_name][category] = category_embedding
346
+
347
+ cosine_sim[index] = cosine_similarity(input_embedding, category_embedding)
348
+
349
+ sorted_cosine_sim = sorted(cosine_sim.items(), key=lambda item: item[1], reverse=True)
350
+
351
+ return sorted_cosine_sim
352
+
353
+
354
+ ### Below is the main function, creating the app demo for text search engine using the text embeddings.
355
+
356
+ if __name__ == "__main__":
357
+ ### Text Search ###
358
+ ### There will be Bonus marks of 10% for the teams that submit a URL for your deployed web app.
359
+ ### Bonus: You can also submit a publicly accessible link to the deployed web app.
360
+
361
+ st.sidebar.title("GloVe Twitter")
362
+ st.sidebar.markdown(
363
+ """
364
+ GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Pretrained on
365
+ 2 billion tweets with vocabulary size of 1.2 million. Download from [Stanford NLP](http://nlp.stanford.edu/data/glove.twitter.27B.zip).
366
+
367
+ Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. *GloVe: Global Vectors for Word Representation*.
368
+ """
369
+ )
370
+
371
+ model_type = st.sidebar.selectbox("Choose the model", ("25d", "50d", "100d"), index=1)
372
+
373
+
374
+ st.title("Search Based Retrieval Demo")
375
+ st.subheader(
376
+ "Pass in space separated categories you want this search demo to be about."
377
+ )
378
+ # st.selectbox(label="Pick the categories you want this search demo to be about...",
379
+ # options=("Flowers Colors Cars Weather Food", "Chocolate Milk", "Anger Joy Sad Frustration Worry Happiness", "Positive Negative"),
380
+ # key="categories"
381
+ # )
382
+ st.session_state["categories"] = "Flowers Colors Cars Weather Food"
383
+
384
+ st.text_input(
385
+ label="Categories", key="categories", value="Flowers Colors Cars Weather Food"
386
+ )
387
+ print(st.session_state["categories"])
388
+ print(type(st.session_state["categories"]))
389
+ # print("Categories = ", categories)
390
+ # st.session_state.categories = categories
391
+
392
+ st.subheader("Pass in an input word or even a sentence")
393
+ text_search = st.text_input(
394
+ label="Input your sentence",
395
+ key="text_search",
396
+ value="Roses are red, trucks are blue, and Seattle is grey right now",
397
+ )
398
+ # st.session_state.text_search = text_search
399
+
400
+ # Download glove embeddings if it doesn't exist
401
+ embeddings_path = "embeddings_" + str(model_type) + "_temp.npy"
402
+ word_index_dict_path = "word_index_dict_" + str(model_type) + "_temp.pkl"
403
+ if not os.path.isfile(embeddings_path) or not os.path.isfile(word_index_dict_path):
404
+ print("Model type = ", model_type)
405
+ glove_path = "Data/glove_" + str(model_type) + ".pkl"
406
+ print("glove_path = ", glove_path)
407
+
408
+ # Download embeddings from google drive
409
+ with st.spinner("Downloading glove embeddings..."):
410
+ download_glove_embeddings_gdrive(model_type)
411
+
412
+
413
+ # Load glove embeddings
414
+ word_index_dict, embeddings = load_glove_embeddings_gdrive(model_type)
415
+
416
+
417
+ # Find closest word to an input word
418
+ if st.session_state.text_search:
419
+ # Glove embeddings
420
+ print("Glove Embedding")
421
+ embeddings_metadata = {
422
+ "embedding_model": "glove",
423
+ "word_index_dict": word_index_dict,
424
+ "embeddings": embeddings,
425
+ "model_type": model_type,
426
+ }
427
+ with st.spinner("Obtaining Cosine similarity for Glove..."):
428
+ sorted_cosine_sim_glove = get_sorted_cosine_similarity(
429
+ st.session_state.text_search, embeddings_metadata
430
+ )
431
+
432
+ # Sentence transformer embeddings
433
+ print("Sentence Transformer Embedding")
434
+ embeddings_metadata = {"embedding_model": "transformers", "model_name": ""}
435
+ with st.spinner("Obtaining Cosine similarity for 384d sentence transformer..."):
436
+ sorted_cosine_sim_transformer = get_sorted_cosine_similarity(
437
+ st.session_state.text_search, embeddings_metadata
438
+ )
439
+
440
+ # Results and Plot Pie Chart for Glove
441
+ print("Categories are: ", st.session_state.categories)
442
+ st.subheader(
443
+ "Closest word I have between: "
444
+ + st.session_state.categories
445
+ + " as per different Embeddings"
446
+ )
447
+
448
+ print(sorted_cosine_sim_glove)
449
+ print(sorted_cosine_sim_transformer)
450
+ # print(sorted_distilbert)
451
+ # Altair Chart for all models
452
+ plot_alatirchart(
453
+ {
454
+ "glove_" + str(model_type): sorted_cosine_sim_glove,
455
+ "sentence_transformer_384": sorted_cosine_sim_transformer,
456
+ }
457
+ )
458
+ # "distilbert_512": sorted_distilbert})
459
+
460
+ st.write("")
461
+ st.write(
462
+ "Demo developed by [Your Name](https://www.linkedin.com/in/your_id/ - Optional)"
463
+ )
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ sentence-transformers==2.2.2
2
+ numpy==1.24.4
3
+ huggingface_hub==0.25.0
4
+ gdown==4.6.0
5
+ matplotlib==3.7.3
6
+ pandas==2.0.0
7
+ scikit-learn==1.2.2