Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -43,7 +43,6 @@ st.sidebar.success("Load Successfully!")
|
|
| 43 |
if not torch.cuda.is_available():
|
| 44 |
print("Warning: No GPU found. Please add GPU to your notebook")
|
| 45 |
|
| 46 |
-
|
| 47 |
#We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
|
| 48 |
bi_encoder = SentenceTransformer(option1)
|
| 49 |
bi_encoder.max_seq_length = 256 #Truncate long passages to 256 tokens
|
|
@@ -52,38 +51,13 @@ top_k = 32 #Number of passages we want to retrieve with
|
|
| 52 |
#The bi-encoder will retrieve 100 documents. We use a cross-encoder, to re-rank the results list to improve the quality
|
| 53 |
cross_encoder = CrossEncoder(option2)
|
| 54 |
|
| 55 |
-
#
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
passages = []
|
| 64 |
-
'''
|
| 65 |
-
with gzip.open(wikipedia_filepath, 'rt', encoding='utf8') as fIn:
|
| 66 |
-
for line in fIn:
|
| 67 |
-
data = json.loads(line.strip())
|
| 68 |
-
|
| 69 |
-
#Add all paragraphs
|
| 70 |
-
#passages.extend(data['paragraphs'])
|
| 71 |
-
|
| 72 |
-
#Only add the first paragraph
|
| 73 |
-
passages.append(data['paragraphs'][0])
|
| 74 |
-
'''
|
| 75 |
-
|
| 76 |
-
with open(etsy_filepath, 'r') as EtsyJson:
|
| 77 |
-
for line in EtsyJson:
|
| 78 |
-
data = json.loads(line.strip())
|
| 79 |
-
#passages.append(data['query'])
|
| 80 |
-
passages.append(data['title'])
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
print("Passages:", len(passages))
|
| 84 |
-
|
| 85 |
-
# We encode all passages into our vector space. This takes about 5 minutes (depends on your GPU speed)
|
| 86 |
-
corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)
|
| 87 |
|
| 88 |
# This function will search all wikipedia articles for passages that
|
| 89 |
# answer the query
|
|
|
|
| 43 |
if not torch.cuda.is_available():
|
| 44 |
print("Warning: No GPU found. Please add GPU to your notebook")
|
| 45 |
|
|
|
|
| 46 |
#We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
|
| 47 |
bi_encoder = SentenceTransformer(option1)
|
| 48 |
bi_encoder.max_seq_length = 256 #Truncate long passages to 256 tokens
|
|
|
|
| 51 |
#The bi-encoder will retrieve 100 documents. We use a cross-encoder, to re-rank the results list to improve the quality
|
| 52 |
cross_encoder = CrossEncoder(option2)
|
| 53 |
|
| 54 |
+
# load pre-train embeedings files
|
| 55 |
+
embedding_cache_path = 'etsy-embeddings.pkl'
|
| 56 |
+
print("Load pre-computed embeddings from disc")
|
| 57 |
+
with open(embedding_cache_path, "rb") as fIn:
|
| 58 |
+
cache_data = pickle.load(fIn)
|
| 59 |
+
corpus_sentences = cache_data['sentences']
|
| 60 |
+
corpus_embeddings = cache_data['embeddings']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
# This function will search all wikipedia articles for passages that
|
| 63 |
# answer the query
|