| import glob |
| import json |
| import os |
| from io import BytesIO |
|
|
| import more_itertools |
| import pandas as pd |
|
|
| import datasets |
| from datasets import Dataset, DatasetDict, DatasetInfo, Features, Sequence, Value,load_dataset |
| from datasets.fingerprint import Hasher |
| import pickle |
| from datasets import ClassLabel, Dataset, DatasetDict, interleave_datasets, load_dataset,get_dataset_split_names |
|
|
|
|
| def to_dict_element(el, cols): |
| bucked_fields = more_itertools.bucket(cols, key=lambda x: x.split(".")[0]) |
| final_dict = {} |
| for parent_name in set(x.split(".")[0] for x in cols): |
|
|
| fields = [y.split(".")[-1] for y in list(bucked_fields[parent_name])] |
| if len(fields) == 1 and fields[0] == parent_name: |
| final_dict[parent_name] = el[fields[0]] |
| else: |
| parent_list = [] |
| zipped_fields = list(zip(*[el[f"{parent_name}.{child}"] for child in fields])) |
| for x in zipped_fields: |
| parent_list.append({k: v for k, v in zip(fields, x)}) |
| final_dict[parent_name] = parent_list |
| return final_dict |
|
|
|
|
| logger = datasets.logging.get_logger(__name__) |
|
|
|
|
| _CITATION = """ """ |
|
|
| _DESCRIPTION = """ """ |
| base_features = {"source": Value(dtype="string"), |
| "meta":{ |
| "id":Value(dtype="string"), |
| "qid":Value(dtype="string"), |
| "question":Value(dtype="string"), |
| "title":Value(dtype="string"), |
| "text":Value(dtype="string"), |
| } |
| } |
|
|
|
|
| def get_config_splits(path): |
| return {config:datasets.get_dataset_split_names(path,config) |
| for config in datasets.get_dataset_config_names(path)} |
|
|
|
|
| reranking_mapped_features = Features({**base_features,"target": Value(dtype="string"),}) |
|
|
| inference_mapped_features = Features(base_features) |
|
|
|
|
|
|
| class MappedMultitaskConfig(datasets.BuilderConfig): |
| """BuilderConfig for MappedMultitaskDPR.""" |
|
|
| def __init__(self, features=None, retriever=None,feature_format=None, **kwargs): |
|
|
| super(MappedMultitaskConfig, self).__init__(**kwargs) |
| self.features = features |
| self.retriever = retriever |
| self.feature_format = feature_format |
|
|
|
|
| class MappedMultitask(datasets.GeneratorBasedBuilder): |
|
|
| BUILDER_CONFIGS = [ |
| MappedMultitaskConfig( |
| name="reranking_bm25", |
| version=datasets.Version("1.0.1", ""), |
| description="MappedMultitask dataset in DPR format with the bm25 retrieval results", |
| features=reranking_mapped_features, |
| retriever="bm25", |
| feature_format="reranking", |
| ), |
| MappedMultitaskConfig( |
| name="reranking_dprnq", |
| version=datasets.Version("1.0.1", ""), |
| description="MappedMultitask dataset in DPR format with the bm25 retrieval results", |
| features=reranking_mapped_features, |
| retriever="dprnq", |
| feature_format="reranking", |
| ), |
| ] |
|
|
| def _info(self): |
| self.features = self.config.features |
| self.retriever = self.config.retriever |
| self.feature_format = self.config.feature_format |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=self.config.features, |
| supervised_keys=None, |
| homepage="", |
| citation=_CITATION, |
|
|
| ) |
|
|
| def _split_generators(self, dl_manager): |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={"split": "train"}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={"split": "validation"}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={"split": "test"}, |
| ), |
| ] |
|
|
| def _prepare_split(self, split_generator, **kwargs): |
| self.info.features = self.config.features |
| super()._prepare_split(split_generator, **kwargs) |
|
|
| def _generate_examples(self, split): |
| """This function returns the examples in the raw (text) form.""" |
| dataset_list = [] |
| qampari = load_dataset("iohadrubin/mapped_qampari", self.info.config_name) |
| if split in get_config_splits("iohadrubin/mapped_qampari")[self.info.config_name] and split in qampari: |
| dataset_list.append(qampari[split].flatten()) |
| |
| nq = load_dataset("iohadrubin/mapped_nq", self.info.config_name) |
| if split in get_config_splits("iohadrubin/mapped_nq")[self.info.config_name] and split in nq: |
| dataset_list.append(nq[split].flatten()) |
| flattened_dataset = interleave_datasets(datasets=dataset_list).flatten() |
| |
| for i,element in enumerate(flattened_dataset): |
| new_element = dict(source=element['source'],target=element['target']) |
| new_element['meta'] = dict(id=element['meta.id'], |
| qid=element['meta.qid'], |
| question=element['meta.question'], |
| title=element['meta.title'], |
| text=element['meta.text'], |
| ) |
| yield i, new_element |
| |
|
|
|
|
|
|
|
|
|
|
|
|