Datasets:
Update wiki_dpr.py
Browse files- wiki_dpr.py +37 -46
wiki_dpr.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import os
|
| 2 |
|
| 3 |
-
import
|
|
|
|
| 4 |
|
| 5 |
import datasets
|
| 6 |
|
|
@@ -37,7 +38,14 @@ _NQ_VECTORS_URL = "https://dl.fbaipublicfiles.com/dpr/data/wiki_encoded/single/n
|
|
| 37 |
|
| 38 |
_MULTISET_VECTORS_URL = "https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_{i}"
|
| 39 |
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
|
| 43 |
class WikiDprConfig(datasets.BuilderConfig):
|
|
@@ -95,7 +103,7 @@ class WikiDprConfig(datasets.BuilderConfig):
|
|
| 95 |
self.index_file = "dummy." + self.index_file
|
| 96 |
|
| 97 |
|
| 98 |
-
class WikiDpr(datasets.
|
| 99 |
BUILDER_CONFIG_CLASS = WikiDprConfig
|
| 100 |
BUILDER_CONFIGS = [
|
| 101 |
WikiDprConfig(
|
|
@@ -130,48 +138,24 @@ class WikiDpr(datasets.GeneratorBasedBuilder):
|
|
| 130 |
)
|
| 131 |
|
| 132 |
def _split_generators(self, dl_manager):
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
downloaded_files["vectors_files"] = dl_manager.download([vectors_url.format(i=i) for i in range(50)])
|
| 141 |
return [
|
| 142 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=downloaded_files),
|
| 143 |
]
|
| 144 |
|
| 145 |
-
def
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
break
|
| 153 |
-
if i == 21015300:
|
| 154 |
-
break # ignore the last 24 examples for which the embeddings are missing.
|
| 155 |
-
id, text, title = line.strip().split("\t")
|
| 156 |
-
text = text[1:-1] # remove " symbol at the beginning and the end
|
| 157 |
-
text = text.replace('""', '"') # replace double quotes by simple quotes
|
| 158 |
-
if self.config.with_embeddings:
|
| 159 |
-
if vec_idx >= len(vecs):
|
| 160 |
-
if len(vectors_files) == 0:
|
| 161 |
-
logger.warning(f"Ran out of vector files at index {i}")
|
| 162 |
-
break
|
| 163 |
-
vecs = np.load(open(vectors_files.pop(0), "rb"), allow_pickle=True)
|
| 164 |
-
vec_idx = 0
|
| 165 |
-
vec_id, vec = vecs[vec_idx]
|
| 166 |
-
assert int(id) == int(vec_id), f"ID mismatch between lines {id} and vector {vec_id}"
|
| 167 |
-
yield id, {"id": id, "text": text, "title": title, "embeddings": vec}
|
| 168 |
-
vec_idx += 1
|
| 169 |
-
else:
|
| 170 |
-
yield id, {
|
| 171 |
-
"id": id,
|
| 172 |
-
"text": text,
|
| 173 |
-
"title": title,
|
| 174 |
-
}
|
| 175 |
|
| 176 |
def _post_processing_resources(self, split):
|
| 177 |
if self.config.with_index:
|
|
@@ -182,10 +166,17 @@ class WikiDpr(datasets.GeneratorBasedBuilder):
|
|
| 182 |
def _download_post_processing_resources(self, split, resource_name, dl_manager):
|
| 183 |
if resource_name == "embeddings_index":
|
| 184 |
try:
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
except (FileNotFoundError, ConnectionError): # index doesn't exist
|
| 190 |
pass
|
| 191 |
|
|
|
|
| 1 |
import os
|
| 2 |
|
| 3 |
+
import pyarrow as pa
|
| 4 |
+
import pyarrow.parquet as pq
|
| 5 |
|
| 6 |
import datasets
|
| 7 |
|
|
|
|
| 38 |
|
| 39 |
_MULTISET_VECTORS_URL = "https://dl.fbaipublicfiles.com/rag/rag_multiset_embeddings/wiki_passages_{i}"
|
| 40 |
|
| 41 |
+
_DATA_TO_NUM_SHARDS = {
|
| 42 |
+
"nq": 157,
|
| 43 |
+
"multiset": 157,
|
| 44 |
+
"no_embeddings": 28,
|
| 45 |
+
"dummy.nq": 1,
|
| 46 |
+
"dummy.multiset": 1,
|
| 47 |
+
"dummy.no_embeddings": 1,
|
| 48 |
+
}
|
| 49 |
|
| 50 |
|
| 51 |
class WikiDprConfig(datasets.BuilderConfig):
|
|
|
|
| 103 |
self.index_file = "dummy." + self.index_file
|
| 104 |
|
| 105 |
|
| 106 |
+
class WikiDpr(datasets.ArrowBasedBuilder):
|
| 107 |
BUILDER_CONFIG_CLASS = WikiDprConfig
|
| 108 |
BUILDER_CONFIGS = [
|
| 109 |
WikiDprConfig(
|
|
|
|
| 138 |
)
|
| 139 |
|
| 140 |
def _split_generators(self, dl_manager):
|
| 141 |
+
data_dir = self.config.embeddings_name if self.config.with_embeddings else "no_embeddings"
|
| 142 |
+
if self.config.dummy:
|
| 143 |
+
data_dir = "dummy." + data_dir
|
| 144 |
+
num_shards = _DATA_TO_NUM_SHARDS[data_dir]
|
| 145 |
+
data_dir = os.path.join("data", self.config.wiki_split, data_dir)
|
| 146 |
+
files = [os.path.join(data_dir, f"train-{i:05d}-of-{num_shards:05d}.parquet") for i in range(num_shards)]
|
| 147 |
+
downloaded_files = dl_manager.download_and_extract(files)
|
|
|
|
| 148 |
return [
|
| 149 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": downloaded_files}),
|
| 150 |
]
|
| 151 |
|
| 152 |
+
def _generate_tables(self, files):
|
| 153 |
+
for file in files:
|
| 154 |
+
with open(file, "rb") as f:
|
| 155 |
+
f = pq.ParquetFile(f)
|
| 156 |
+
for batch_idx, batch in enumerate(f.iter_batches(batch_size=1000)):
|
| 157 |
+
yield batch_idx, pa.Table.from_batches([batch])
|
| 158 |
+
batch_idx += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
|
| 160 |
def _post_processing_resources(self, split):
|
| 161 |
if self.config.with_index:
|
|
|
|
| 166 |
def _download_post_processing_resources(self, split, resource_name, dl_manager):
|
| 167 |
if resource_name == "embeddings_index":
|
| 168 |
try:
|
| 169 |
+
if not self.config.dummy:
|
| 170 |
+
index_file = os.path.join(
|
| 171 |
+
"data",
|
| 172 |
+
self.config.wiki_split,
|
| 173 |
+
self.config.embeddings_name,
|
| 174 |
+
os.path.basename(self.config.index_file.format(split=split).split(".", 2)[-1])
|
| 175 |
+
)
|
| 176 |
+
downloaded_resources = dl_manager.download_and_extract(
|
| 177 |
+
{"embeddings_index": index_file}
|
| 178 |
+
)
|
| 179 |
+
return downloaded_resources["embeddings_index"]
|
| 180 |
except (FileNotFoundError, ConnectionError): # index doesn't exist
|
| 181 |
pass
|
| 182 |
|