Spaces:
Build error
Build error
| import streamlit as st | |
| from streamlit_tags import st_tags, st_tags_sidebar | |
| from keytotext import pipeline | |
| from PIL import Image | |
| import json | |
| from sentence_transformers import SentenceTransformer, CrossEncoder, util | |
| import gzip | |
| import os | |
| import torch | |
| import pickle | |
| import random | |
| import numpy as np | |
| ############ | |
| ## Main page | |
| ############ | |
| st.write("# Demonstration for Etsy Query Expansion(Etsy-QE)") | |
| st.markdown("***Idea is to build a model which will take query as inputs and generate expansion information as outputs.***") | |
| image = Image.open('etsy-shop-LLC.png') | |
| st.image(image) | |
| st.sidebar.write("# Top-N Selection") | |
| maxtags_sidebar = st.sidebar.slider('Number of query allowed?', 1, 20, 1, key='ehikwegrjifbwreuk') | |
| #user_query = st_tags( | |
| # label='# Enter Query:', | |
| # text='Press enter to add more', | |
| # value=['Mother'], | |
| # suggestions=['gift', 'nike', 'wool'], | |
| # maxtags=maxtags_sidebar, | |
| # key="aljnf") | |
| user_query = st.text_input("Enter a query for the generated text: e.g., gift, home decoration ...") | |
| # Add selectbox in streamlit | |
| option1 = st.sidebar.selectbox( | |
| 'Which transformers model would you like to be selected?', | |
| ('multi-qa-MiniLM-L6-cos-v1','null','null')) | |
| option2 = st.sidebar.selectbox( | |
| 'Which corss-encoder model would you like to be selected?', | |
| ('cross-encoder/ms-marco-MiniLM-L-6-v2','null','null')) | |
| st.sidebar.success("Load Successfully!") | |
| #if not torch.cuda.is_available(): | |
| # print("Warning: No GPU found. Please add GPU to your notebook") | |
| #We use the Bi-Encoder to encode all passages, so that we can use it with sematic search | |
| bi_encoder = SentenceTransformer(option1,device='cpu') | |
| bi_encoder.max_seq_length = 256 #Truncate long passages to 256 tokens | |
| top_k = 32 #Number of passages we want to retrieve with the bi-encoder | |
| #The bi-encoder will retrieve 100 documents. We use a cross-encoder, to re-rank the results list to improve the quality | |
| cross_encoder = CrossEncoder(option2, device='cpu') | |
| passages = [] | |
| # load pre-train embeedings files | |
| embedding_cache_path = 'etsy-embeddings-cpu.pkl' | |
| print("Load pre-computed embeddings from disc") | |
| with open(embedding_cache_path, "rb") as fIn: | |
| cache_data = pickle.load(fIn) | |
| passages = cache_data['sentences'] | |
| corpus_embeddings = cache_data['embeddings'] | |
| from rank_bm25 import BM25Okapi | |
| from sklearn.feature_extraction import _stop_words | |
| import string | |
| from tqdm.autonotebook import tqdm | |
| import numpy as np | |
| import re | |
| import yake | |
| language = "en" | |
| max_ngram_size = 3 | |
| deduplication_threshold = 0.9 | |
| deduplication_algo = 'seqm' | |
| windowSize = 3 | |
| numOfKeywords = 3 | |
| custom_kw_extractor = yake.KeywordExtractor(lan=language, n=max_ngram_size, dedupLim=deduplication_threshold, dedupFunc=deduplication_algo, windowsSize=windowSize, top=numOfKeywords, features=None) | |
| # We lower case our text and remove stop-words from indexing | |
| def bm25_tokenizer(text): | |
| tokenized_doc = [] | |
| for token in text.lower().split(): | |
| token = token.strip(string.punctuation) | |
| if len(token) > 0 and token not in _stop_words.ENGLISH_STOP_WORDS: | |
| tokenized_doc.append(token) | |
| return tokenized_doc | |
| tokenized_corpus = [] | |
| for passage in tqdm(passages): | |
| tokenized_corpus.append(bm25_tokenizer(passage)) | |
| bm25 = BM25Okapi(tokenized_corpus) | |
| def word_len(s): | |
| return len([i for i in s.split(' ') if i]) | |
| # This function will search all wikipedia articles for passages that | |
| # answer the query | |
| def search(query): | |
| print("Input query:", query) | |
| total_qe = [] | |
| ##### BM25 search (lexical search) ##### | |
| bm25_scores = bm25.get_scores(bm25_tokenizer(query)) | |
| top_n = np.argpartition(bm25_scores, -5)[-5:] | |
| bm25_hits = [{'corpus_id': idx, 'score': bm25_scores[idx]} for idx in top_n] | |
| bm25_hits = sorted(bm25_hits, key=lambda x: x['score'], reverse=True) | |
| #print("Top-10 lexical search (BM25) hits") | |
| qe_string = [] | |
| for hit in bm25_hits[0:1000]: | |
| if passages[hit['corpus_id']].replace("\n", " ") not in qe_string: | |
| qe_string.append(passages[hit['corpus_id']].replace("\n", "")) | |
| sub_string = [] | |
| for item in qe_string: | |
| for sub_item in item.split(","): | |
| sub_string.append(sub_item) | |
| #print(sub_string) | |
| total_qe.append(sub_string) | |
| ##### Sematic Search ##### | |
| # Encode the query using the bi-encoder and find potentially relevant passages | |
| query_embedding = bi_encoder.encode(query, convert_to_tensor=True) | |
| hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=top_k) | |
| hits = hits[0] # Get the hits for the first query | |
| ##### Re-Ranking ##### | |
| # Now, score all retrieved passages with the cross_encoder | |
| cross_inp = [[query, passages[hit['corpus_id']]] for hit in hits] | |
| cross_scores = cross_encoder.predict(cross_inp) | |
| # Sort results by the cross-encoder scores | |
| for idx in range(len(cross_scores)): | |
| hits[idx]['cross-score'] = cross_scores[idx] | |
| # Output of top-10 hits from bi-encoder | |
| #print("\n-------------------------\n") | |
| #print("Top-N Bi-Encoder Retrieval hits") | |
| hits = sorted(hits, key=lambda x: x['score'], reverse=True) | |
| qe_string = [] | |
| for hit in hits[0:1000]: | |
| if passages[hit['corpus_id']].replace("\n", " ") not in qe_string: | |
| qe_string.append(passages[hit['corpus_id']].replace("\n", "")) | |
| #print(qe_string) | |
| total_qe.append(qe_string) | |
| # Output of top-10 hits from re-ranker | |
| #print("\n-------------------------\n") | |
| #print("Top-N Cross-Encoder Re-ranker hits") | |
| hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True) | |
| qe_string = [] | |
| for hit in hits[0:1000]: | |
| if passages[hit['corpus_id']].replace("\n", " ") not in qe_string: | |
| qe_string.append(passages[hit['corpus_id']].replace("\n", "")) | |
| #print(qe_string) | |
| total_qe.append(qe_string) | |
| # Total Results | |
| total_qe.append(qe_string) | |
| st.write("E-Commerce Query Expansion Results: \n") | |
| res = [] | |
| for sub_list in total_qe: | |
| for i in sub_list: | |
| rs = re.sub("([^\u0030-\u0039\u0041-\u007a])", ' ', i) | |
| rs_final = re.sub("\x20\x20", "\n", rs) | |
| #st.write(rs_final.strip()) | |
| res.append(rs_final.strip()) | |
| res_clean = [] | |
| for out in res: | |
| if len(out) > 20: | |
| keywords = custom_kw_extractor.extract_keywords(out) | |
| for key in keywords: | |
| res_clean.append(key[0]) | |
| else: | |
| res_clean.append(out) | |
| show_out = [] | |
| for i in res_clean: | |
| num = word_len(i) | |
| if num > 1: | |
| show_out.append(i) | |
| unique_list = list(set(show_out)) | |
| new_unique_list = [item for item in unique_list if item != query] | |
| Lowercasing_list = [item.lower() for item in new_unique_list] | |
| st.write(Lowercasing_list[0:maxtags_sidebar]) | |
| return Lowercasing_list | |
| def search_nolog(query): | |
| total_qe = [] | |
| ##### BM25 search (lexical search) ##### | |
| bm25_scores = bm25.get_scores(bm25_tokenizer(query)) | |
| top_n = np.argpartition(bm25_scores, -5)[-5:] | |
| bm25_hits = [{'corpus_id': idx, 'score': bm25_scores[idx]} for idx in top_n] | |
| bm25_hits = sorted(bm25_hits, key=lambda x: x['score'], reverse=True) | |
| qe_string = [] | |
| for hit in bm25_hits[0:1000]: | |
| if passages[hit['corpus_id']].replace("\n", " ") not in qe_string: | |
| qe_string.append(passages[hit['corpus_id']].replace("\n", "")) | |
| sub_string = [] | |
| for item in qe_string: | |
| for sub_item in item.split(","): | |
| sub_string.append(sub_item) | |
| total_qe.append(sub_string) | |
| ##### Sematic Search ##### | |
| # Encode the query using the bi-encoder and find potentially relevant passages | |
| query_embedding = bi_encoder.encode(query, convert_to_tensor=True) | |
| hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=top_k) | |
| hits = hits[0] # Get the hits for the first query | |
| ##### Re-Ranking ##### | |
| # Now, score all retrieved passages with the cross_encoder | |
| cross_inp = [[query, passages[hit['corpus_id']]] for hit in hits] | |
| cross_scores = cross_encoder.predict(cross_inp) | |
| # Sort results by the cross-encoder scores | |
| for idx in range(len(cross_scores)): | |
| hits[idx]['cross-score'] = cross_scores[idx] | |
| # Output of top-10 hits from bi-encoder | |
| hits = sorted(hits, key=lambda x: x['score'], reverse=True) | |
| qe_string = [] | |
| for hit in hits[0:1000]: | |
| if passages[hit['corpus_id']].replace("\n", " ") not in qe_string: | |
| qe_string.append(passages[hit['corpus_id']].replace("\n", "")) | |
| total_qe.append(qe_string) | |
| # Output of top-10 hits from re-ranker | |
| hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True) | |
| qe_string = [] | |
| for hit in hits[0:1000]: | |
| if passages[hit['corpus_id']].replace("\n", " ") not in qe_string: | |
| qe_string.append(passages[hit['corpus_id']].replace("\n", "")) | |
| total_qe.append(qe_string) | |
| # Total Results | |
| total_qe.append(qe_string) | |
| res = [] | |
| for sub_list in total_qe: | |
| for i in sub_list: | |
| rs = re.sub("([^\u0030-\u0039\u0041-\u007a])", ' ', i) | |
| rs_final = re.sub("\x20\x20", "\n", rs) | |
| res.append(rs_final.strip()) | |
| res_clean = [] | |
| for out in res: | |
| if len(out) > 20: | |
| keywords = custom_kw_extractor.extract_keywords(out) | |
| for key in keywords: | |
| res_clean.append(key[0]) | |
| else: | |
| res_clean.append(out) | |
| show_out = [] | |
| for i in res_clean: | |
| num = word_len(i) | |
| if num > 1: | |
| show_out.append(i) | |
| return show_out | |
| def reranking(): | |
| rerank_list = [] | |
| reres = [] | |
| remove_dup = [] | |
| rerank_list = search_nolog(query = user_query) | |
| unique_list = list(set(rerank_list)) | |
| Lowercasing_list = [item.lower() for item in unique_list] | |
| new_unique_list = [item for item in Lowercasing_list if item != user_query] | |
| for i in new_unique_list: | |
| clean_string = i.strip() | |
| if clean_string not in remove_dup: | |
| remove_dup.append(clean_string) | |
| st.write("E-Commerce Query Expansion Results: \n") | |
| st.write(remove_dup[0:maxtags_sidebar]) | |
| for i in remove_dup[0:maxtags_sidebar]: | |
| reres.append(i) | |
| np.random.seed(7) | |
| np.random.shuffle(reres) | |
| st.write("Reranking Results: \n") | |
| st.write(reres) | |
| st.write("## Results:") | |
| if st.button('Generated Expansion'): | |
| out_res = search(query = user_query) | |
| #st.success(out_res) | |
| if st.button('Rerank'): | |
| out_res = reranking() | |
| #st.success(out_res) |