File size: 5,353 Bytes
d30313a b082b92 d30313a b082b92 d30313a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 | import glob
import json
import os
from io import BytesIO
import more_itertools
import pandas as pd
import datasets
from datasets import Dataset, DatasetDict, DatasetInfo, Features, Sequence, Value,load_dataset
from datasets.fingerprint import Hasher
import pickle
from datasets import ClassLabel, Dataset, DatasetDict, interleave_datasets, load_dataset,get_dataset_split_names
def to_dict_element(el, cols):
bucked_fields = more_itertools.bucket(cols, key=lambda x: x.split(".")[0])
final_dict = {}
for parent_name in set(x.split(".")[0] for x in cols):
fields = [y.split(".")[-1] for y in list(bucked_fields[parent_name])]
if len(fields) == 1 and fields[0] == parent_name:
final_dict[parent_name] = el[fields[0]]
else:
parent_list = []
zipped_fields = list(zip(*[el[f"{parent_name}.{child}"] for child in fields]))
for x in zipped_fields:
parent_list.append({k: v for k, v in zip(fields, x)})
final_dict[parent_name] = parent_list
return final_dict
logger = datasets.logging.get_logger(__name__)
_CITATION = """ """
_DESCRIPTION = """ """
base_features = {"source": Value(dtype="string"),
"meta":{
"id":Value(dtype="string"),
"qid":Value(dtype="string"),
"question":Value(dtype="string"),
"title":Value(dtype="string"),
"text":Value(dtype="string"),
}
}
def get_config_splits(path):
return {config:datasets.get_dataset_split_names(path,config)
for config in datasets.get_dataset_config_names(path)}
reranking_mapped_features = Features({**base_features,"target": Value(dtype="string"),})
inference_mapped_features = Features(base_features)
class MappedMultitaskConfig(datasets.BuilderConfig):
"""BuilderConfig for MappedMultitaskDPR."""
def __init__(self, features=None, retriever=None,feature_format=None, **kwargs):
super(MappedMultitaskConfig, self).__init__(**kwargs)
self.features = features
self.retriever = retriever
self.feature_format = feature_format
class MappedMultitask(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
MappedMultitaskConfig(
name="reranking_bm25",
version=datasets.Version("1.0.1", ""),
description="MappedMultitask dataset in DPR format with the bm25 retrieval results",
features=reranking_mapped_features,
retriever="bm25",
feature_format="reranking",
),
MappedMultitaskConfig(
name="reranking_dprnq",
version=datasets.Version("1.0.1", ""),
description="MappedMultitask dataset in DPR format with the bm25 retrieval results",
features=reranking_mapped_features,
retriever="dprnq",
feature_format="reranking",
),
]
def _info(self):
self.features = self.config.features
self.retriever = self.config.retriever
self.feature_format = self.config.feature_format
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=self.config.features,
supervised_keys=None,
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"split": "validation"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"split": "test"},
),
]
def _prepare_split(self, split_generator, **kwargs):
self.info.features = self.config.features
super()._prepare_split(split_generator, **kwargs)
def _generate_examples(self, split):
"""This function returns the examples in the raw (text) form."""
dataset_list = []
qampari = load_dataset("iohadrubin/mapped_qampari", self.info.config_name)
if split in get_config_splits("iohadrubin/mapped_qampari")[self.info.config_name] and split in qampari:
dataset_list.append(qampari[split].flatten())
nq = load_dataset("iohadrubin/mapped_nq", self.info.config_name)
if split in get_config_splits("iohadrubin/mapped_nq")[self.info.config_name] and split in nq:
dataset_list.append(nq[split].flatten())
flattened_dataset = interleave_datasets(datasets=dataset_list).flatten()
for i,element in enumerate(flattened_dataset):
new_element = dict(source=element['source'],target=element['target'])
new_element['meta'] = dict(id=element['meta.id'],
qid=element['meta.qid'],
question=element['meta.question'],
title=element['meta.title'],
text=element['meta.text'],
)
yield i, new_element
|