baselines-v2 / cde_benchmark /formatters /data_formatter.py
manu's picture
Upload folder using huggingface_hub
545c4d5 verified
from typing import List, Tuple
from datasets import DatasetDict, Dataset, load_dataset
class BaseDataFormatter:
def get_nested(self) -> Tuple[List[List[str]], List[List[str]]]:
raise NotImplementedError
def get_flattened(self) -> Tuple[List[str], List[str]]:
raise NotImplementedError
def get_queries(self) -> Tuple[List[str], List[str]]:
raise NotImplementedError
class DataFormatter(BaseDataFormatter):
def __init__(self, dataset_path, split, query_key="queries", doc_key="documents"):
self.doc_dataset = None
self.queries_dataset = None
self._load_from_path(dataset_path, split, query_key, doc_key)
self.doc_dataset = self.doc_dataset.map(self.parse_id)
def _load_from_path(self, path, split, query_key, doc_key):
self.doc_dataset = load_dataset(path, doc_key, split=split)
self.queries_dataset = load_dataset(path, query_key, split=split)
# mapping dataset is used to map queries to relevant documents
@staticmethod
def parse_id(sample):
doc_id, internal_id = sample["chunk_id"].split("_")
return {"doc_id": doc_id, "internal_id": int(internal_id)}
def get_nested(self) -> Tuple[List[List[str]], List[List[str]]]:
# TODO: verify it's sorted
return list(
self.doc_dataset.to_pandas().groupby("doc_id")["chunk"].apply(list)
), list(self.doc_dataset.to_pandas().groupby("doc_id")["chunk_id"].apply(list))
def get_flattened(self) -> Tuple[List[str], List[str]]:
# flatten data
return self.doc_dataset["chunk"], self.doc_dataset["chunk_id"]
def get_queries(self) -> Tuple[List[str], List[str]]:
return self.queries_dataset["query"], self.queries_dataset["chunk_id"]
class BEIRDataFormatter(BaseDataFormatter):
def __init__(
self,
dataset_path,
split,
query_key="queries",
doc_key="corpus",
concat_num_docs=2,
):
self.doc_dataset = None
self.queries_dataset = None
self.mapping = None
self._load_from_path(dataset_path, split, query_key, doc_key)
self.concat_num_docs = concat_num_docs
def _load_from_path(self, path, split, query_key, doc_key):
self.doc_dataset = load_dataset(path, doc_key, split=split)
self.queries_dataset = load_dataset(path, query_key, split=split)
mapping_dataset = load_dataset(path, "qrels", split=split)
self.mapping = {
query["query-id"]: query["corpus-id"] for query in mapping_dataset
}
# mapping dataset is used to map queries to relevant documents
def get_nested(self) -> Tuple[List[List[str]], List[List[str]]]:
self.doc_dataset = self.doc_dataset.shuffle(seed=42)
idx = []
for i in range(0, len(self.doc_dataset)):
idx.extend([i] * self.concat_num_docs)
idx = idx[: len(self.doc_dataset)]
self.doc_dataset = self.doc_dataset.add_column("doc_id", idx)
return list(
self.doc_dataset.to_pandas().groupby("doc_id")["text"].apply(list)
), list(self.doc_dataset.to_pandas().groupby("doc_id")["_id"].apply(list))
def get_flattened(self) -> Tuple[List[str], List[str]]:
# flatten data
return self.doc_dataset["text"], self.doc_dataset["_id"]
def get_queries(self) -> Tuple[List[str], List[str]]:
gold_docs = []
for query in self.queries_dataset:
gold_docs.append(self.mapping[query["_id"]])
return self.queries_dataset["text"], gold_docs