| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | """PropSegmEnt: A Large-Scale Corpus for Proposition-Level Segmentation and Entailment Recognition.""" |
| |
|
| |
|
| | import csv |
| | import json |
| | import os |
| |
|
| | import datasets |
| |
|
| | _CITATION = """\ |
| | @inproceedings{chen2023propsegment, |
| | title = "{PropSegmEnt}: A Large-Scale Corpus for Proposition-Level Segmentation and Entailment Recognition", |
| | author = "Chen, Sihao and Buthpitiya, Senaka and Fabrikant, Alex and Roth, Dan and Schuster, Tal", |
| | booktitle = "Findings of the Association for Computational Linguistics: ACL 2023", |
| | year = "2023", |
| | } |
| | """ |
| |
|
| | |
| | |
| | _DESCRIPTION = """\ |
| | This is a reproduced (i.e. after web-crawling) and processed version of the "PropSegment" dataset from Google Research. |
| | |
| | Since the News portion of the dataset is released only via urls, we reconstruct the dataset by crawling. Overall, ~96% |
| | of the dataset can be reproduced, and the rest ~4% either have url no longer valid, or sentences that have been edited |
| | (i.e. cannot be aligned with the orignial dataset). |
| | |
| | PropSegment (Proposition-level Segmentation and Entailment) is a large-scale, human annotated dataset for segmenting |
| | English text into propositions, and recognizing proposition-level entailment relations --- whether a different, related |
| | document entails each proposition, contradicts it, or neither. |
| | |
| | The original dataset features >45k human annotated propositions, i.e. individual semantic units within sentences, as |
| | well as >45k entailment labels between propositions and documents. |
| | """ |
| |
|
| | _HOMEPAGE = "https://github.com/google-research-datasets/PropSegmEnt" |
| |
|
| | _LICENSE = "CC-BY-4.0" |
| |
|
| | |
| | |
| | _URL = "https://raw.githubusercontent.com/schen149/PropSegmEnt/main/" |
| | _URLS = { |
| | "segmentation": { |
| | "train": _URL + "proposition_segmentation.train.jsonl", |
| | "dev": _URL + "proposition_segmentation.dev.jsonl", |
| | "test": _URL + "proposition_segmentation.test.jsonl", |
| | }, |
| | "nli": { |
| | "train": _URL + "propnli.train.jsonl", |
| | "dev": _URL + "propnli.dev.jsonl", |
| | "test": _URL + "propnli.test.jsonl", |
| | } |
| | } |
| |
|
| | _CONFIG_TO_FILENAME = { |
| | "segmentation": "proposition_segmentation", |
| | "nli": "propnli" |
| | } |
| |
|
| | class PropSegment(datasets.GeneratorBasedBuilder): |
| |
|
| | VERSION = datasets.Version("1.0.0") |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | BUILDER_CONFIGS = [ |
| | datasets.BuilderConfig(name="segmentation", version=VERSION, description="This part of my dataset covers a first domain"), |
| | datasets.BuilderConfig(name="nli", version=VERSION, description="This part of my dataset covers a second domain"), |
| | ] |
| |
|
| | DEFAULT_CONFIG_NAME = "segmentation" |
| |
|
| | def _info(self): |
| | if self.config.name == "segmentation": |
| | features = datasets.Features( |
| | { |
| | "sentence": datasets.Value("string"), |
| | "propositions": datasets.Value("string"), |
| | } |
| | ) |
| | else: |
| | features = datasets.Features( |
| | { |
| | "hypothesis": datasets.Value("string"), |
| | "premise": datasets.Value("string"), |
| | "label": datasets.Value("string") |
| | } |
| | ) |
| | return datasets.DatasetInfo( |
| | |
| | description=_DESCRIPTION, |
| | |
| | features=features, |
| | |
| | |
| | |
| | |
| | homepage=_HOMEPAGE, |
| | |
| | license=_LICENSE, |
| | |
| | citation=_CITATION, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | config_name = self.config.name |
| | urls = _URLS[config_name] |
| | data_dir = dl_manager.download_and_extract(urls) |
| | file_prefix = _CONFIG_TO_FILENAME[config_name] |
| |
|
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | |
| | gen_kwargs={ |
| | "filepath": os.path.join( |
| | data_dir, "{}.train.jsonl".format(file_prefix)), |
| | "split": "train", |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | |
| | gen_kwargs={ |
| | "filepath": os.path.join( |
| | data_dir, "{}.dev.jsonl".format(file_prefix)), |
| | "split": "dev", |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | |
| | gen_kwargs={ |
| | "filepath": os.path.join( |
| | data_dir, "{}.test.jsonl".format(file_prefix)), |
| | "split": "test" |
| | }, |
| | ), |
| | ] |
| |
|
| | |
| | def _generate_examples(self, filepath, split): |
| | |
| | with open(filepath, encoding="utf-8") as f: |
| | for key, row in enumerate(f): |
| | data = json.loads(row) |
| | if self.config.name == "segmentation": |
| | yield key, { |
| | "sentence": data["sentence"], |
| | "propositions": data["propositions"], |
| | } |
| | else: |
| | yield key, { |
| | "hypothesis": data["hypothesis"], |
| | "premise": data["premise"], |
| | "label": data["label"], |
| | } |