| import glob |
| import json |
| import os |
| from io import BytesIO |
|
|
| import more_itertools |
| import pandas as pd |
|
|
| import datasets |
| from datasets import Dataset, DatasetDict, DatasetInfo, Features, Sequence, Value, load_dataset |
| from datasets.fingerprint import Hasher |
| import pickle |
|
|
|
|
| def to_dict_element(el, cols): |
| bucked_fields = more_itertools.bucket(cols, key=lambda x: x.split(".")[0]) |
| final_dict = {} |
| for parent_name in set(x.split(".")[0] for x in cols): |
|
|
| fields = [y.split(".")[-1] for y in list(bucked_fields[parent_name])] |
| if len(fields) == 1 and fields[0] == parent_name: |
| final_dict[parent_name] = el[fields[0]] |
| else: |
| parent_list = [] |
| zipped_fields = list(zip(*[el[f"{parent_name}.{child}"] for child in fields])) |
| for x in zipped_fields: |
| parent_list.append({k: v for k, v in zip(fields, x)}) |
| final_dict[parent_name] = parent_list |
| return final_dict |
|
|
|
|
|
|
| def mega_hash(func, dataset_name, dataset_config, dataset_obj, split): |
| hasher = Hasher() |
| hasher.update(repr(dataset_obj)) |
| hasher.update(pickle.dumps(func)) |
| hasher.update(split) |
| hasher.update(dataset_config) |
| hasher.update(dataset_name) |
|
|
| return hasher.hexdigest() |
|
|
|
|
| logger = datasets.logging.get_logger(__name__) |
|
|
| _CITATION = """ """ |
|
|
| _DESCRIPTION = """ """ |
|
|
| def get_config_splits(path): |
| return {config:datasets.get_dataset_split_names(path,config) |
| for config in datasets.get_dataset_config_names(path)} |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| base_features = {"source": Value(dtype="string"), |
| "meta":{ |
| "id":Value(dtype="string"), |
| "qid":Value(dtype="string"), |
| "question":Value(dtype="string"), |
| "title":Value(dtype="string"), |
| "text":Value(dtype="string"), |
| } |
| } |
|
|
| reranking_mapped_features = Features({**base_features,"target": Value(dtype="string"),}) |
|
|
| inference_mapped_features = Features(base_features) |
|
|
|
|
| class MappedNQConfig(datasets.BuilderConfig): |
| """BuilderConfig for MappedQampariDPR.""" |
|
|
| def __init__(self, features=None, retriever=None, feature_format=None, **kwargs): |
| super(MappedNQConfig, self).__init__(**kwargs) |
| self.features = features |
| self.retriever = retriever |
| self.feature_format = feature_format |
|
|
|
|
| def to_source_target(example): |
| |
| source = [] |
| target = [] |
| meta_list = [] |
| for ctx,title, question, qids, cids, answer_list in zip( |
| example["positive_ctxs.text"], |
| example["positive_ctxs.title"], |
| example["question"], |
| example["qid"], |
| example["positive_ctxs.passage_id"], |
| example["answers"], |
| ): |
| for c, t, _, _, cid in zip(ctx, title, question, qids, cids): |
| source.append(f"Title: {t} Text: {c} Question: {question} ") |
| target.append(f"Answer: {answer_list[0]}") |
| meta_list.append({"id": cid, "qid": qids, "question": question, "title": t, "text": c}) |
| for ctx, title, question, qids, cids in zip( |
| example["hard_negative_ctxs.text"], |
| example["hard_negative_ctxs.title"], |
| example["question"], |
| example["qid"], |
| example["hard_negative_ctxs.passage_id"],): |
| for c, t, _, _, cid in zip(ctx, title, question, qids, cids): |
| source.append(f"Title: {t} Text: {c} Question: {question}") |
| target.append("Not relevant") |
| meta_list.append({"id": cid, "qid": qids, "question": question, "title": t, "text": c}) |
| return {"target": target, "source": source, "meta": meta_list} |
|
|
|
|
| def transform_dpr(dataset, dataset_name, dataset_config): |
| for split in dataset.column_names: |
| _split_ds = dataset[split].flatten() |
| fingerprint = mega_hash(to_source_target, dataset_name, |
| dataset_config, _split_ds, split) |
| dataset[split] = _split_ds.map( |
| to_source_target, |
| batched=True, |
| remove_columns=_split_ds.column_names, |
| new_fingerprint=fingerprint |
| ) |
| return dataset |
|
|
|
|
| class MappedNQ(datasets.GeneratorBasedBuilder): |
| BUILDER_CONFIGS = [ |
| MappedNQConfig( |
| name="reranking_dprnq", |
| version=datasets.Version("1.0.1", ""), |
| description="MappedNQ dataset in reranking format with the dprnq retrieval results", |
| features=reranking_mapped_features, |
| retriever="dprnq", |
| feature_format="reranking", |
| ), |
| MappedNQConfig( |
| name="reranking_bm25", |
| version=datasets.Version("1.0.1", ""), |
| description="MappedNQ dataset in reranking format with the bm25 retrieval results", |
| features=reranking_mapped_features, |
| retriever="bm25", |
| feature_format="reranking", |
| ), |
| MappedNQConfig( |
| name="inference_dprnq", |
| version=datasets.Version("1.0.1", ""), |
| description="MappedNQ dataset in DPR format with the bm25 retrieval results", |
| features=inference_mapped_features, |
| retriever="dprnq", |
| feature_format="inference", |
| ), |
| ] |
|
|
| def _info(self): |
| self.features = self.config.features |
| self.retriever = self.config.retriever |
| self.feature_format = self.config.feature_format |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=self.config.features, |
| supervised_keys=None, |
| homepage="", |
| citation=_CITATION, |
|
|
| ) |
|
|
| def _split_generators(self, dl_manager): |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={"split": "train"}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={"split": "validation"}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={"split": "test"}, |
| ), |
| ] |
|
|
| def _prepare_split(self, split_generator, **kwargs): |
| self.info.features = self.config.features |
| super()._prepare_split(split_generator, **kwargs) |
|
|
| def _generate_examples(self, split): |
| """This function returns the examples in the raw (text) form.""" |
| |
| path = "/home/ohadr/ssd/dalle-mini/qampari/nq.py" |
| flattened_dataset = load_dataset(path, self.info.config_name).flatten() |
| if split not in get_config_splits(path)[self.info.config_name] or split not in flattened_dataset: |
| return |
| flattened_dataset = flattened_dataset[split] |
| if self.feature_format=="reranking": |
| |
| |
| fingerprint = mega_hash(to_source_target, "nq", |
| self.info.config_name, flattened_dataset, split) |
| transformed_dataset = flattened_dataset.map( |
| to_source_target, |
| batched=True, |
| remove_columns=flattened_dataset.column_names, |
| new_fingerprint=fingerprint |
| ) |
|
|
| for i, element in enumerate(transformed_dataset): |
| yield i, element |
| elif self.feature_format=="inference": |
| |
| for i,element in enumerate(flattened_dataset): |
| element = to_dict_element(element,cols=flattened_dataset.column_names) |
| for j,ctx in enumerate(element['ctxs']): |
| qid,ctx,question = element['qid'],ctx,element["question"] |
| |
| ctx.pop("score",None) |
| source_element = {"source": f"Title: {ctx['title']}\nText: {ctx['text']}\nQuestion: {question}\n", |
| "meta":{**ctx, |
| "qid":qid, |
| "question":question} |
| } |
| yield f"{qid}__{ctx['id']}", source_element |
|
|
| else: |
| assert False |