Upload wikipedia_ubinary_ivf_faiss_50m.index
#4
by
aamirshakir
- opened
- .gitattributes +0 -1
- README.md +3 -4
- app.py +58 -222
- requirements.txt +2 -2
- wikipedia_int8_usearch_1m.index +3 -0
- wikipedia_ubinary_faiss_1m.index +3 -0
- wikipedia_ubinary_faiss_50m.index +3 -0
- wikipedia_ubinary_ivf_faiss_50m.index +3 -0
.gitattributes
CHANGED
|
@@ -37,4 +37,3 @@ wikipedia_ubinary_faiss_1m.index filter=lfs diff=lfs merge=lfs -text
|
|
| 37 |
wikipedia_int8_usearch_1m.index filter=lfs diff=lfs merge=lfs -text
|
| 38 |
wikipedia_ubinary_faiss_50m.index filter=lfs diff=lfs merge=lfs -text
|
| 39 |
wikipedia_ubinary_ivf_faiss_50m.index filter=lfs diff=lfs merge=lfs -text
|
| 40 |
-
wikipedia_int8_usearch_50m.index filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 37 |
wikipedia_int8_usearch_1m.index filter=lfs diff=lfs merge=lfs -text
|
| 38 |
wikipedia_ubinary_faiss_50m.index filter=lfs diff=lfs merge=lfs -text
|
| 39 |
wikipedia_ubinary_ivf_faiss_50m.index filter=lfs diff=lfs merge=lfs -text
|
|
|
README.md
CHANGED
|
@@ -1,13 +1,12 @@
|
|
| 1 |
---
|
| 2 |
title: Quantized Retrieval
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: red
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
-
short_description: Efficient quantized retrieval over Wikipedia
|
| 11 |
---
|
| 12 |
|
| 13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
title: Quantized Retrieval
|
| 3 |
+
emoji: 🐠
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: red
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.22.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
|
@@ -1,89 +1,51 @@
|
|
|
|
|
| 1 |
import time
|
| 2 |
-
import html
|
| 3 |
import gradio as gr
|
| 4 |
-
from datasets import load_dataset
|
| 5 |
-
from huggingface_hub import hf_hub_download
|
| 6 |
import pandas as pd
|
| 7 |
from sentence_transformers import SentenceTransformer
|
| 8 |
-
from sentence_transformers.
|
| 9 |
import faiss
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
# Load titles, texts, and int8 embeddings in a lazy Dataset, allowing us to efficiently access specific rows on demand
|
| 13 |
-
# Note that we never actually use the int8 embeddings for search directly, they are only used for rescoring after the binary search
|
| 14 |
-
title_text_int8_dataset = load_dataset(
|
| 15 |
-
"sentence-transformers/quantized-retrieval-data", split="train"
|
| 16 |
-
).select_columns(["url", "title", "text", "embedding"])
|
| 17 |
-
# title_text_int8_dataset = load_from_disk("wikipedia-mxbai-embed-int8-index").select_columns(["url", "title", "text", "embedding"])
|
| 18 |
-
|
| 19 |
-
TOTAL_NUM_DOCS = title_text_int8_dataset.num_rows
|
| 20 |
|
| 21 |
-
# Load
|
| 22 |
-
|
| 23 |
-
repo_id="sentence-transformers/quantized-retrieval-data",
|
| 24 |
-
filename="wikipedia_ubinary_faiss_50m.index",
|
| 25 |
-
local_dir=".",
|
| 26 |
-
repo_type="dataset",
|
| 27 |
-
)
|
| 28 |
-
binary_ivf_index_path = hf_hub_download(
|
| 29 |
-
repo_id="sentence-transformers/quantized-retrieval-data",
|
| 30 |
-
filename="wikipedia_ubinary_ivf_faiss_50m.index",
|
| 31 |
-
local_dir=".",
|
| 32 |
-
repo_type="dataset",
|
| 33 |
-
)
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
|
|
|
| 37 |
|
| 38 |
# Load the SentenceTransformer model for embedding the queries
|
| 39 |
-
model = SentenceTransformer(
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
"
|
| 45 |
-
|
| 46 |
-
"What is the largest mammal?",
|
| 47 |
-
"How to bake a chocolate cake?",
|
| 48 |
-
"What is the theory of relativity?",
|
| 49 |
-
]
|
| 50 |
-
model.encode_query(warmup_queries)
|
| 51 |
|
| 52 |
|
| 53 |
-
def search(
|
| 54 |
-
query,
|
| 55 |
-
top_k: int = 20,
|
| 56 |
-
rescore_multiplier: int = 4,
|
| 57 |
-
use_approx: bool = True,
|
| 58 |
-
):
|
| 59 |
# 1. Embed the query as float32
|
| 60 |
start_time = time.time()
|
| 61 |
-
query_embedding = model.
|
| 62 |
embed_time = time.time() - start_time
|
| 63 |
|
| 64 |
# 2. Quantize the query to ubinary
|
| 65 |
start_time = time.time()
|
| 66 |
-
query_embedding_ubinary = quantize_embeddings(
|
| 67 |
-
query_embedding.reshape(1, -1), "ubinary"
|
| 68 |
-
)
|
| 69 |
quantize_time = time.time() - start_time
|
| 70 |
|
| 71 |
-
# 3. Search the binary index
|
| 72 |
-
index = binary_ivf_index if use_approx else binary_index
|
| 73 |
start_time = time.time()
|
| 74 |
-
_scores, binary_ids =
|
| 75 |
-
query_embedding_ubinary, top_k * rescore_multiplier
|
| 76 |
-
)
|
| 77 |
binary_ids = binary_ids[0]
|
| 78 |
search_time = time.time() - start_time
|
| 79 |
-
num_docs_searched = len(binary_ids)
|
| 80 |
|
| 81 |
# 4. Load the corresponding int8 embeddings
|
| 82 |
start_time = time.time()
|
| 83 |
-
int8_embeddings =
|
| 84 |
-
|
| 85 |
-
)
|
| 86 |
-
load_int8_time = time.time() - start_time
|
| 87 |
|
| 88 |
# 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
|
| 89 |
start_time = time.time()
|
|
@@ -92,186 +54,60 @@ def search(
|
|
| 92 |
|
| 93 |
# 6. Sort the scores and return the top_k
|
| 94 |
start_time = time.time()
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
|
|
|
| 98 |
sort_time = time.time() - start_time
|
| 99 |
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
for i in range(len(top_k_indices)):
|
| 110 |
-
title = html.escape(str(raw_top_k_titles[i]))
|
| 111 |
-
url = html.escape(str(top_k_urls[i]))
|
| 112 |
-
text = html.escape(str(top_k_texts[i]))
|
| 113 |
-
score_str = f"{top_k_scores[i]:.2f}"
|
| 114 |
-
rank_str = str(i + 1)
|
| 115 |
-
binary_rank_str = str(indices[i] + 1)
|
| 116 |
-
card_html = f"""
|
| 117 |
-
<div style=\"border: 1px solid var(--border-color-primary, #e0e0e0); border-radius: var(--block-radius); padding: 10px 12px; margin-bottom: 10px; background-color: var(--block-background-fill, transparent); color: inherit;\">
|
| 118 |
-
<div style=\"display: flex; align-items: flex-start; justify-content: space-between; gap: 8px; margin-bottom: 4px;\">
|
| 119 |
-
<div style=\"font-size: 16px; font-weight: 600; min-width: 0;\">
|
| 120 |
-
<a href=\"{url}\" target=\"_blank\" style=\"text-decoration: none; color: var(--link-text-color, #1f6feb); padding-left: 0;\">{title}</a>
|
| 121 |
-
</div>
|
| 122 |
-
<div style=\"font-size: 12px; color: var(--body-text-color-subdued, #586069); text-align: right; white-space: nowrap;\">
|
| 123 |
-
Score: {score_str} • Rank: {rank_str} • Binary rank: {binary_rank_str}
|
| 124 |
-
</div>
|
| 125 |
-
</div>
|
| 126 |
-
<div style=\"font-size: 13px; line-height: 1.4; max-height: 8em; overflow: hidden;\">{text}</div>
|
| 127 |
-
</div>
|
| 128 |
-
"""
|
| 129 |
-
cards.append(card_html)
|
| 130 |
-
|
| 131 |
-
if cards:
|
| 132 |
-
cards_html = "\n".join(cards)
|
| 133 |
-
else:
|
| 134 |
-
cards_html = "<div>No results.</div>"
|
| 135 |
-
|
| 136 |
-
total_retrieval_time = (
|
| 137 |
-
quantize_time
|
| 138 |
-
+ search_time
|
| 139 |
-
+ load_int8_time
|
| 140 |
-
+ rescore_time
|
| 141 |
-
+ sort_time
|
| 142 |
-
+ load_text_time
|
| 143 |
-
)
|
| 144 |
-
num_docs_retrieved = len(top_k_indices)
|
| 145 |
-
search_mode = "Approximate (IVF)" if use_approx else "Exact"
|
| 146 |
-
|
| 147 |
-
summary_md = f"""
|
| 148 |
-
<div style=\"border: 1px solid var(--border-color-primary, #e0e0e0); border-radius: var(--block-radius); padding: 10px 12px; background-color: var(--block-background-fill, transparent);\">
|
| 149 |
-
<h3 style=\"margin-top: 0;\">Search Summary</h3>
|
| 150 |
-
<ul style=\"margin-top: 0; margin-bottom: 8px; padding-left: 18px;\">
|
| 151 |
-
<li>Total docs in corpus: {TOTAL_NUM_DOCS:,}</li>
|
| 152 |
-
<li>Docs searched: {num_docs_searched}</li>
|
| 153 |
-
<li>Docs retrieved: {num_docs_retrieved}</li>
|
| 154 |
-
<li>Search mode: {search_mode}</li>
|
| 155 |
-
</ul>
|
| 156 |
-
<h4>Timings (in seconds)</h4>
|
| 157 |
-
<ul style=\"margin-top: 0; margin-bottom: 0; padding-left: 18px;\">
|
| 158 |
-
<li>Embed on CPU: {embed_time:.4f}</li>
|
| 159 |
-
<li>Quantize: {quantize_time:.4f}</li>
|
| 160 |
-
<li>Search: {search_time:.4f}</li>
|
| 161 |
-
<li>Load int8: {load_int8_time:.4f}</li>
|
| 162 |
-
<li>Rescore: {rescore_time:.4f}</li>
|
| 163 |
-
<li>Sort: {sort_time:.4f}</li>
|
| 164 |
-
<li>Load text: {load_text_time:.4f}</li>
|
| 165 |
-
</ul>
|
| 166 |
-
<h5>Total retrieval time: {total_retrieval_time:.4f} seconds</h5>
|
| 167 |
-
</div>"""
|
| 168 |
-
|
| 169 |
-
return cards_html, summary_md
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
css = """
|
| 173 |
-
.no-pad-container {
|
| 174 |
-
--block-padding: 0px;
|
| 175 |
-
}
|
| 176 |
-
"""
|
| 177 |
|
| 178 |
with gr.Blocks(title="Quantized Retrieval") as demo:
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
<div style='border: 1px solid var(--border-color-primary, #e0e0e0); border-radius: var(--block-radius); padding: 12px 14px; background-color: var(--block-background-fill, transparent);'>
|
| 184 |
|
| 185 |
-
<
|
| 186 |
|
| 187 |
-
This demo showcases retrieval using<a href="https://huggingface.co/blog/embedding-quantization" style="padding-left: 0.5ch; padding-right: 0.5ch;">quantized embeddings</a>on a CPU. The corpus consists of<a href="https://huggingface.co/datasets/sentence-transformers/quantized-retrieval-data" style="padding-left: 0.5ch; padding-right: 0.5ch;">41 million texts</a>from Wikipedia articles.
|
| 188 |
-
</div>
|
| 189 |
-
""",
|
| 190 |
-
elem_classes="no-pad-container",
|
| 191 |
-
)
|
| 192 |
-
with gr.Accordion("Click to learn about the retrieval process", open=False):
|
| 193 |
-
gr.Markdown(
|
| 194 |
-
"""
|
| 195 |
Details:
|
| 196 |
1. The query is embedded using the [`mixedbread-ai/mxbai-embed-large-v1`](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) SentenceTransformer model.
|
| 197 |
2. The query is quantized to binary using the `quantize_embeddings` function from the SentenceTransformers library.
|
| 198 |
-
3. A binary index (
|
| 199 |
-
4. The top
|
| 200 |
-
5. The top
|
| 201 |
-
6. The top
|
| 202 |
-
7. The titles and texts of the top 20 documents are loaded on the fly from disk and displayed.
|
| 203 |
|
| 204 |
-
This process is designed to be memory efficient and fast, with the binary index being small enough to fit in memory and the int8 index being loaded as a view to save memory.
|
| 205 |
-
In total, this process requires keeping 1) the model in memory, 2) the binary index in memory, and 3) the int8 index on disk. With a dimensionality of 1024,
|
| 206 |
we need `1024 / 8 * num_docs` bytes for the binary index and `1024 * num_docs` bytes for the int8 index.
|
| 207 |
|
| 208 |
This is notably cheaper than doing the same process with float32 embeddings, which would require `4 * 1024 * num_docs` bytes of memory/disk space for the float32 index, i.e. 32x as much memory and 4x as much disk space.
|
| 209 |
Additionally, the binary index is much faster (up to 32x) to search than the float32 index, while the rescoring is also extremely efficient. In conclusion, this process allows for fast, scalable, cheap, and memory-efficient retrieval.
|
| 210 |
|
| 211 |
-
Feel free to check out the [code for this demo](https://huggingface.co/spaces/
|
| 212 |
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
"""
|
| 216 |
-
|
| 217 |
-
query = gr.Textbox(
|
| 218 |
-
label="Query for Wikipedia articles",
|
| 219 |
-
placeholder="Enter a query to search for relevant texts from Wikipedia.",
|
| 220 |
-
)
|
| 221 |
-
search_button = gr.Button(value="Search", variant="secondary")
|
| 222 |
-
with gr.Column(scale=1):
|
| 223 |
-
top_k = gr.Slider(
|
| 224 |
-
minimum=10,
|
| 225 |
-
maximum=1000,
|
| 226 |
-
step=1,
|
| 227 |
-
value=20,
|
| 228 |
-
label="Number of documents to retrieve",
|
| 229 |
-
info="Number of documents to retrieve using binary search",
|
| 230 |
-
)
|
| 231 |
-
rescore_multiplier = gr.Slider(
|
| 232 |
-
minimum=1,
|
| 233 |
-
maximum=10,
|
| 234 |
-
step=1,
|
| 235 |
-
value=4,
|
| 236 |
-
label="Rescore multiplier",
|
| 237 |
-
info="Search for `rescore_multiplier` as many documents to rescore",
|
| 238 |
-
)
|
| 239 |
-
use_approx = gr.Radio(
|
| 240 |
-
choices=[("Approximate Search", True), ("Exact Search", False)],
|
| 241 |
-
value=True,
|
| 242 |
-
label="Search Settings",
|
| 243 |
-
)
|
| 244 |
|
| 245 |
with gr.Row():
|
| 246 |
-
with gr.Column(scale=
|
| 247 |
-
|
| 248 |
with gr.Column(scale=1):
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
examples = gr.Examples(
|
| 252 |
-
examples=[
|
| 253 |
-
"What is the coldest metal to the touch?",
|
| 254 |
-
"Who won the FIFA World Cup in 2018?",
|
| 255 |
-
"How to make a paper airplane?",
|
| 256 |
-
"Who was the first woman to cross the Pacific ocean by plane?",
|
| 257 |
-
],
|
| 258 |
-
fn=search,
|
| 259 |
-
inputs=[query],
|
| 260 |
-
outputs=[cards, summary],
|
| 261 |
-
cache_examples=False,
|
| 262 |
-
run_on_click=True,
|
| 263 |
-
)
|
| 264 |
|
| 265 |
-
query.submit(
|
| 266 |
-
|
| 267 |
-
inputs=[query, top_k, rescore_multiplier, use_approx],
|
| 268 |
-
outputs=[cards, summary],
|
| 269 |
-
)
|
| 270 |
-
search_button.click(
|
| 271 |
-
search,
|
| 272 |
-
inputs=[query, top_k, rescore_multiplier, use_approx],
|
| 273 |
-
outputs=[cards, summary],
|
| 274 |
-
)
|
| 275 |
|
| 276 |
demo.queue()
|
| 277 |
-
demo.launch(
|
|
|
|
| 1 |
+
|
| 2 |
import time
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
+
from datasets import load_dataset
|
|
|
|
| 5 |
import pandas as pd
|
| 6 |
from sentence_transformers import SentenceTransformer
|
| 7 |
+
from sentence_transformers.util import quantize_embeddings
|
| 8 |
import faiss
|
| 9 |
+
from usearch.index import Index
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
# Load titles and texts
|
| 12 |
+
title_text_dataset = load_dataset("mixedbread-ai/wikipedia-2023-11-embed-en-pre-1", split="train").select_columns(["title", "text"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
# Load the int8 and binary indices. Int8 is loaded as a view to save memory, as we never actually perform search with it.
|
| 15 |
+
int8_view = Index.restore("wikipedia_int8_usearch_1m.index", view=True)
|
| 16 |
+
binary_index: faiss.IndexBinaryFlat = faiss.read_index_binary("wikipedia_ubinary_faiss_1m.index")
|
| 17 |
|
| 18 |
# Load the SentenceTransformer model for embedding the queries
|
| 19 |
+
model = SentenceTransformer(
|
| 20 |
+
"mixedbread-ai/mxbai-embed-large-v1",
|
| 21 |
+
prompts={
|
| 22 |
+
"retrieval": "Represent this sentence for searching relevant passages: ",
|
| 23 |
+
},
|
| 24 |
+
default_prompt_name="retrieval",
|
| 25 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
+
def search(query, top_k: int = 10, rescore_multiplier: int = 4):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
# 1. Embed the query as float32
|
| 30 |
start_time = time.time()
|
| 31 |
+
query_embedding = model.encode(query)
|
| 32 |
embed_time = time.time() - start_time
|
| 33 |
|
| 34 |
# 2. Quantize the query to ubinary
|
| 35 |
start_time = time.time()
|
| 36 |
+
query_embedding_ubinary = quantize_embeddings(query_embedding.reshape(1, -1), "ubinary")
|
|
|
|
|
|
|
| 37 |
quantize_time = time.time() - start_time
|
| 38 |
|
| 39 |
+
# 3. Search the binary index
|
|
|
|
| 40 |
start_time = time.time()
|
| 41 |
+
_scores, binary_ids = binary_index.search(query_embedding_ubinary, top_k * rescore_multiplier)
|
|
|
|
|
|
|
| 42 |
binary_ids = binary_ids[0]
|
| 43 |
search_time = time.time() - start_time
|
|
|
|
| 44 |
|
| 45 |
# 4. Load the corresponding int8 embeddings
|
| 46 |
start_time = time.time()
|
| 47 |
+
int8_embeddings = int8_view[binary_ids].astype(int)
|
| 48 |
+
load_time = time.time() - start_time
|
|
|
|
|
|
|
| 49 |
|
| 50 |
# 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
|
| 51 |
start_time = time.time()
|
|
|
|
| 54 |
|
| 55 |
# 6. Sort the scores and return the top_k
|
| 56 |
start_time = time.time()
|
| 57 |
+
top_k_indices = (-scores).argsort()[-top_k:]
|
| 58 |
+
top_k_scores = scores[top_k_indices]
|
| 59 |
+
top_k_titles, top_k_texts = zip(*[(title_text_dataset[idx]["title"], title_text_dataset[idx]["text"]) for idx in binary_ids[top_k_indices].tolist()])
|
| 60 |
+
df = pd.DataFrame({"Score": [round(value, 2) for value in top_k_scores], "Title": top_k_titles, "Text": top_k_texts})
|
| 61 |
sort_time = time.time() - start_time
|
| 62 |
|
| 63 |
+
return df, {
|
| 64 |
+
"Embed Time": f"{embed_time:.4f} s",
|
| 65 |
+
"Quantize Time": f"{quantize_time:.4f} s",
|
| 66 |
+
"Search Time": f"{search_time:.4f} s",
|
| 67 |
+
"Load Time": f"{load_time:.4f} s",
|
| 68 |
+
"Rescore Time": f"{rescore_time:.4f} s",
|
| 69 |
+
"Sort Time": f"{sort_time:.4f} s",
|
| 70 |
+
"Total Retrieval Time": f"{quantize_time + search_time + load_time + rescore_time + sort_time:.4f} s"
|
| 71 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
with gr.Blocks(title="Quantized Retrieval") as demo:
|
| 74 |
+
gr.Markdown(
|
| 75 |
+
"""
|
| 76 |
+
## Quantized Retrieval - Binary Search with Scalar (int8) Rescoring
|
| 77 |
+
This demo showcases the retrieval using [quantized embeddings](https://huggingface.co/blog/embedding-quantization). The corpus consists of 1 million texts from Wikipedia articles.
|
|
|
|
| 78 |
|
| 79 |
+
<details><summary>Click to learn about the retrieval process</summary>
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
Details:
|
| 82 |
1. The query is embedded using the [`mixedbread-ai/mxbai-embed-large-v1`](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) SentenceTransformer model.
|
| 83 |
2. The query is quantized to binary using the `quantize_embeddings` function from the SentenceTransformers library.
|
| 84 |
+
3. A binary index (1M binary embeddings; 130MB of memory/disk space) is searched using the quantized query for the top 40 documents.
|
| 85 |
+
4. The top 40 documents are loaded on the fly from an int8 index on disk (1M int8 embeddings; 0 bytes of memory, 1.19GB of disk space).
|
| 86 |
+
5. The top 40 documents are rescored using the float32 query and the int8 embeddings to get the top 10 documents.
|
| 87 |
+
6. The top 10 documents are sorted by score and displayed.
|
|
|
|
| 88 |
|
| 89 |
+
This process is designed to be memory efficient and fast, with the binary index being small enough to fit in memory and the int8 index being loaded as a view to save memory.
|
| 90 |
+
In total, this process requires keeping 1) the model in memory, 2) the binary index in memory, and 3) the int8 index on disk. With a dimensionality of 1024,
|
| 91 |
we need `1024 / 8 * num_docs` bytes for the binary index and `1024 * num_docs` bytes for the int8 index.
|
| 92 |
|
| 93 |
This is notably cheaper than doing the same process with float32 embeddings, which would require `4 * 1024 * num_docs` bytes of memory/disk space for the float32 index, i.e. 32x as much memory and 4x as much disk space.
|
| 94 |
Additionally, the binary index is much faster (up to 32x) to search than the float32 index, while the rescoring is also extremely efficient. In conclusion, this process allows for fast, scalable, cheap, and memory-efficient retrieval.
|
| 95 |
|
| 96 |
+
Feel free to check out the [code for this demo](https://huggingface.co/spaces/tomaarsen/quantized_retrieval/blob/main/app.py) to learn more about how to apply this in practice.
|
| 97 |
|
| 98 |
+
</details>
|
| 99 |
+
""")
|
| 100 |
+
query = gr.Textbox(label="Query for Wikipedia articles", placeholder="Enter a query to search for relevant texts from Wikipedia.")
|
| 101 |
+
search_button = gr.Button(value="Search")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
with gr.Row():
|
| 104 |
+
with gr.Column(scale=4):
|
| 105 |
+
output = gr.Dataframe(headers=["Score", "Title", "Text"])
|
| 106 |
with gr.Column(scale=1):
|
| 107 |
+
json = gr.JSON()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
|
| 109 |
+
query.submit(search, inputs=[query], outputs=[output, json])
|
| 110 |
+
search_button.click(search, inputs=[query], outputs=[output, json])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
|
| 112 |
demo.queue()
|
| 113 |
+
demo.launch(debug=True)
|
requirements.txt
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
sentence-transformers
|
| 2 |
datasets
|
| 3 |
pandas
|
| 4 |
-
huggingface_hub>=0.24.0
|
| 5 |
|
|
|
|
| 6 |
faiss-cpu
|
|
|
|
| 1 |
+
git+https://github.com/tomaarsen/sentence-transformers@feat/quantization
|
| 2 |
datasets
|
| 3 |
pandas
|
|
|
|
| 4 |
|
| 5 |
+
usearch
|
| 6 |
faiss-cpu
|
wikipedia_int8_usearch_1m.index
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2ef74a1e23da9e45ddb9cee8943fb5e46a880684d25d195296464559f2a26f22
|
| 3 |
+
size 1186568944
|
wikipedia_ubinary_faiss_1m.index
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:916036883f29dbe2bc29648a2314d08f4b8b50a026f9cdcc23f4dfdc05d1bef3
|
| 3 |
+
size 129524001
|
wikipedia_ubinary_faiss_50m.index
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1434f64ab27f552e8ebae95ac29ec645d9128f82ca266856070097a7329fa817
|
| 3 |
+
size 5180954273
|
wikipedia_ubinary_ivf_faiss_50m.index
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebb876de1095caabd4ee44b51ef90c3234c4d036267e56890b26bd6f7cbfa53a
|
| 3 |
+
size 5504768347
|