books2 / books2.py
cloverhxy's picture
Update books2.py
f580aa5
import os
import datasets
import pandas as pd
class books2Config(datasets.BuilderConfig):
def __init__(self, features, data_url, **kwargs):
super(books2Config, self).__init__(**kwargs)
self.features = features
self.data_url = data_url
class books2(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
books2Config(
name="pairs",
features={
"ltable_id":datasets.Value("string"),
"rtable_id":datasets.Value("string"),
"label":datasets.Value("string"),
},
data_url="https://huggingface.co/datasets/matchbench/books2/resolve/main/",
),
books2Config(
name="source",
features={
"id":datasets.Value("string"),
"FirstAuthor":datasets.Value("string"),
"SecondAuthor":datasets.Value("string"),
"ThirdAuthor":datasets.Value("string"),
"ISBN13":datasets.Value("string"),
"PageCount":datasets.Value("string"),
"PublishDate":datasets.Value("string"),
"Publisher":datasets.Value("string"),
"Title":datasets.Value("string"),
"ISBN":datasets.Value("string"),
},
data_url="https://huggingface.co/datasets/matchbench/books2/resolve/main/tableA.csv",
),
books2Config(
name="target",
features={
"id":datasets.Value("string"),
"FirstAuthor":datasets.Value("string"),
"SecondAuthor":datasets.Value("string"),
"ThirdAuthor":datasets.Value("string"),
"ISBN13":datasets.Value("string"),
"PageCount":datasets.Value("string"),
"PublishDate":datasets.Value("string"),
"Publisher":datasets.Value("string"),
"Title":datasets.Value("string"),
},
data_url="https://huggingface.co/datasets/matchbench/books2/resolve/main/tableB.csv",
),
]
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(self.config.features)
)
def _split_generators(self, dl_manager):
if self.config.name == "pairs":
return [
datasets.SplitGenerator(
name=split,
gen_kwargs={
"path_file": dl_manager.download_and_extract(os.path.join(self.config.data_url, f"{split}.csv")),
"split":split,
}
)
for split in ["train", "valid", "test"]
]
if self.config.name == "source":
return [ datasets.SplitGenerator(name="source",gen_kwargs={"path_file":dl_manager.download_and_extract(self.config.data_url), "split":"source",})]
if self.config.name == "target":
return [ datasets.SplitGenerator(name="target",gen_kwargs={"path_file":dl_manager.download_and_extract(self.config.data_url), "split":"target",})]
def _generate_examples(self, path_file, split):
file = pd.read_csv(path_file)
for i, row in file.iterrows():
if split not in ['source', 'target']:
yield i, {
"ltable_id": row["ltable_id"],
"rtable_id": row["rtable_id"],
"label": row["label"],
}
else:
if split == 'source':
yield i, {
"id": row["id"],
"FirstAuthor": row["FirstAuthor"],
"SecondAuthor": row["SecondAuthor"],
"ThirdAuthor": row["ThirdAuthor"],
"ISBN13": row["ISBN13"],
"PageCount": row["PageCount"],
"PublishDate": row["PublishDate"],
"Publisher": row["Publisher"],
"Title": row["Title"],
"ISBN": row["ISBN"],
}
else:
yield i, {
"id": row["id"],
"FirstAuthor": row["FirstAuthor"],
"SecondAuthor": row["SecondAuthor"],
"ThirdAuthor": row["ThirdAuthor"],
"ISBN13": row["ISBN13"],
"PageCount": row["PageCount"],
"PublishDate": row["PublishDate"],
"Publisher": row["Publisher"],
"Title": row["Title"],
}