| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """A Dataset loading script for the QA-Discourse dataset (Pyatkin et. al., ACL 2020).""" |
| |
|
| |
|
| | import datasets |
| | from pathlib import Path |
| | from typing import List |
| | import pandas as pd |
| |
|
| |
|
| | _CITATION = """\ |
| | @inproceedings{pyatkin2020qadiscourse, |
| | title={QADiscourse-Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines}, |
| | author={Pyatkin, Valentina and Klein, Ayal and Tsarfaty, Reut and Dagan, Ido}, |
| | booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)}, |
| | pages={2804--2819}, |
| | year={2020} |
| | }""" |
| |
|
| |
|
| | _DESCRIPTION = """\ |
| | The dataset contains question-answer pairs to model discourse relations. |
| | While answers roughly correspond to spans of the sentence, these spans could have been freely adjusted by annotators to grammaticaly fit the question; |
| | Therefore, answers are given just as text and not as identified spans of the original sentence. |
| | See the paper for details: QADiscourse - Discourse Relations as QA Pairs: Representation, Crowdsourcing and Baselines, Pyatkin et. al., 2020 |
| | """ |
| |
|
| | _HOMEPAGE = "https://github.com/ValentinaPy/QADiscourse" |
| |
|
| | _LICENSE = """Resources on this page are licensed CC-BY 4.0, a Creative Commons license requiring Attribution (https://creativecommons.org/licenses/by/4.0/).""" |
| |
|
| |
|
| | _URLs = { |
| | "wikinews.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_train.tsv", |
| | "wikinews.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_dev.tsv", |
| | "wikinews.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikinews_test.tsv", |
| | "wikipedia.train": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_train.tsv", |
| | "wikipedia.dev": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_dev.tsv", |
| | "wikipedia.test": "https://github.com/ValentinaPy/QADiscourse/raw/master/Dataset/wikipedia_test.tsv", |
| | } |
| |
|
| | COLUMNS = ['qasrl_id', 'sentence', 'worker_id', 'full_question', 'full_answer', |
| | 'question_start', 'question_aux', 'question_body', 'answer', |
| | 'untokenized sentence', 'target indices for untok sent'] |
| |
|
| |
|
| | |
| | class QaDiscourse(datasets.GeneratorBasedBuilder): |
| | """QA-Discourse: Discourse Relations as Question-Answer Pairs. """ |
| |
|
| | VERSION = datasets.Version("1.0.2") |
| |
|
| | BUILDER_CONFIGS = [ |
| | datasets.BuilderConfig( |
| | name="plain_text", version=VERSION, description="This provides the QA-Discourse dataset" |
| | ), |
| | ] |
| |
|
| | DEFAULT_CONFIG_NAME = ( |
| | "plain_text" |
| | ) |
| |
|
| | def _info(self): |
| | features = datasets.Features( |
| | { |
| | "sentence": datasets.Value("string"), |
| | "sent_id": datasets.Value("string"), |
| | "question": datasets.Sequence(datasets.Value("string")), |
| | "answers": datasets.Sequence(datasets.Value("string")), |
| | } |
| | ) |
| | return datasets.DatasetInfo( |
| | |
| | description=_DESCRIPTION, |
| | |
| | features=features, |
| | |
| | |
| | |
| | supervised_keys=None, |
| | |
| | homepage=_HOMEPAGE, |
| | |
| | license=_LICENSE, |
| | |
| | citation=_CITATION, |
| | ) |
| | |
| | def _split_generators(self, dl_manager): |
| | """Returns SplitGenerators.""" |
| | |
| | |
| | corpora = {section: Path(dl_manager.download_and_extract(_URLs[section])) |
| | for section in _URLs} |
| | |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | |
| | gen_kwargs={ |
| | "filepaths": [corpora["wikinews.train"], |
| | corpora["wikipedia.train"]], |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | |
| | gen_kwargs={ |
| | "filepaths": [corpora["wikinews.dev"], |
| | corpora["wikipedia.dev"]], |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | |
| | gen_kwargs={ |
| | "filepaths": [corpora["wikinews.test"], |
| | corpora["wikipedia.test"]], |
| | }, |
| | ), |
| | ] |
| | |
| | def _generate_examples(self, filepaths: List[str]): |
| |
|
| | """ |
| | Yields QA-Discourse examples from a tsv file. |
| | Sentences with no QAs will yield an ``empty QA'' record, where both 'question' and 'answers' are empty lists. |
| | """ |
| |
|
| | |
| | df = pd.concat([pd.read_csv(fn, sep='\t', on_bad_lines='skip') for fn in filepaths]).reset_index(drop=True) |
| | df = df.map(str) |
| | for counter, row in df.iterrows(): |
| | |
| | question = [row.question_start, row.question_aux, row.question_body.rstrip('?'), '?'] |
| | answer = [row.answer] |
| | if row.question_start == "_": |
| | question = [] |
| | answer = [] |
| | |
| | yield counter, { |
| | "sentence": row.sentence, |
| | "sent_id": row.qasrl_id, |
| | "question": question, |
| | "answers": answer, |
| | } |
| |
|