| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | """(SC)^2QA: Self-Contained Summary-Centric QA Dataset. |
| | This dataset (https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl_large) contains 529,039 question and article pairs. |
| | If you want {Question, Article, Summary, Length Constraint} 4-tuples, please load sc2qa_commoncrawl (https://huggingface.co/datasets/sc2qa/sc2qa_commoncrawl) |
| | """ |
| |
|
| | import csv |
| |
|
| | import datasets |
| |
|
| |
|
| | logger = datasets.logging.get_logger(__name__) |
| |
|
| |
|
| | _CITATION = """\ |
| | @article{zhou2021generating, |
| | author = {Li Zhou, Kevin Small, Yong Zhang, Sandeep Atluri}, |
| | title = "{Generating Self-Contained and Summary-Centric Question Answer Pairs via Differentiable Reward Imitation Learning}", |
| | conference = {The 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP 2021)}, |
| | year = 2021, |
| | } |
| | """ |
| |
|
| | _DESCRIPTION = """\ |
| | """ |
| |
|
| | _URLS = { |
| | "train":"https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl_large/resolve/main/train.csv", |
| | "val":"https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl_large/resolve/main/val.csv", |
| | "test":"https://huggingface.co/datasets/sc2qa/sc2q_commoncrawl_large/resolve/main/test.csv", |
| | } |
| |
|
| | class SC2QAConfig(datasets.BuilderConfig): |
| | """BuilderConfig for (SC)^2QA.""" |
| |
|
| | def __init__(self, **kwargs): |
| | """BuilderConfig for (SC)^2QA. |
| | |
| | Args: |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | super(SC2QAConfig, self).__init__(**kwargs) |
| |
|
| |
|
| | class SC2QA(datasets.GeneratorBasedBuilder): |
| | BUILDER_CONFIGS = [ |
| | SC2QAConfig( |
| | name="plain_text", |
| | version=datasets.Version("1.0.0", ""), |
| | description="Plain text", |
| | ), |
| | ] |
| |
|
| | def _info(self): |
| | |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features( |
| | { |
| | "question": datasets.Value("string"), |
| | "article": datasets.Value("string"), |
| | "url": datasets.Value("string"), |
| | } |
| | ), |
| | citation=_CITATION, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | downloaded_files = dl_manager.download_and_extract(_URLS) |
| |
|
| | return [ |
| | datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
| | datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}), |
| | datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), |
| | ] |
| |
|
| | def _generate_examples(self, filepath): |
| | """This function returns the examples in the raw (text) form.""" |
| | logger.info("generating examples from = %s", filepath) |
| | key = 0 |
| | with open(filepath, encoding="ascii", errors='ignore') as f: |
| | csv_reader = csv.DictReader(f) |
| | for i, row in enumerate(csv_reader): |
| | yield i, row |
| |
|