|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
|
|
|
import csv |
|
|
import json |
|
|
import os |
|
|
import itertools |
|
|
import datasets |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
@inproceedings{perez2021models, |
|
|
title={Models and Datasets for Cross-Lingual Summarisation}, |
|
|
author={Perez-Beltrachini, Laura and Lapata, Mirella}, |
|
|
booktitle={Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing}, |
|
|
pages={9408--9423}, |
|
|
year={2021} |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
The XWikis Corpus (Perez-Beltrachini and Lapata, 2021) provides datasets with different language pairs and directions for cross-lingual abstractive document summarisation. This current version includes four languages: English, German, French, and Czech. The dataset is derived from Wikipedia. It is based on the observation that for a Wikipedia title, the lead section provides an overview conveying salient information, while the body provides detailed information. It thus assumes the body and lead paragraph as a document-summary pair. Furthermore, as a Wikipedia title can be associated with Wikipedia articles in various languages, 1) Wikipedia’s Interlanguage Links are used to find titles across languages and 2) given any two related Wikipedia titles, e.g., Huile d’Olive (French) and Olive Oil (English), the lead paragraph from one title is paired with the body of the other to derive cross-lingual pairs. |
|
|
""" |
|
|
|
|
|
|
|
|
_HOMEPAGE = "https://datashare.ed.ac.uk/handle/10283/4188" |
|
|
|
|
|
|
|
|
_LICENSE = "CC BY-SA 4.0" |
|
|
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
LANGS = ["en","fr","cs","de","zh"] |
|
|
|
|
|
LPAIRS = [ |
|
|
f"{x}-{y}" for x,y in itertools.product(LANGS,LANGS) if x!=y |
|
|
] |
|
|
|
|
|
_URLs = { |
|
|
"train": [ f"./train/{xy}.jsonl" for xy in LPAIRS] + [ f"./train/{x}.jsonl" for x in LANGS], |
|
|
"validation": [ f"./valid/{xy}.jsonl" for xy in LPAIRS] + [ f"./valid/{x}.jsonl" for x in LANGS], |
|
|
"test": [ f"./test/{xy}.jsonl" for xy in LPAIRS if "-en" in xy] + [ f"./test/{x}.jsonl" for x in LANGS], |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class XWikis(datasets.GeneratorBasedBuilder): |
|
|
"""TODO: Short description of my dataset.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [datasets.BuilderConfig(name=xy , version=VERSION, description=f"XWikis. Language pair: {xy}") for xy in LPAIRS] + \ |
|
|
[datasets.BuilderConfig(name=x , version=VERSION, description=f"XWikis. Monolingual corpus: {x}") for x in LANGS] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "fr-en" |
|
|
|
|
|
def _info(self): |
|
|
|
|
|
features = datasets.Features( |
|
|
{ |
|
|
"gem_id": datasets.Value("string"), |
|
|
"gem_parent_id": datasets.Value("string"), |
|
|
"id": datasets.Value("string"), |
|
|
"src_title": datasets.Value("string"), |
|
|
"tgt_title": datasets.Value("string"), |
|
|
"src_document": datasets.features.Sequence( |
|
|
{ |
|
|
"title": datasets.Value("string"), |
|
|
"section_level": datasets.Value("string"), |
|
|
"content": datasets.Value("string"), |
|
|
}), |
|
|
"src_summary": datasets.Value("string"), |
|
|
"tgt_summary": datasets.Value("string") |
|
|
|
|
|
|
|
|
} |
|
|
) |
|
|
return datasets.DatasetInfo( |
|
|
|
|
|
description=_DESCRIPTION, |
|
|
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
|
|
license=_LICENSE, |
|
|
|
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = {sp:f"./{sp[:5]}/{self.config.name}.jsonl" for sp in ["train","validation","test"] } |
|
|
d_conf = dl_manager.download_and_extract(my_urls) |
|
|
|
|
|
challenge_sets = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
|
|
|
gen_kwargs={ |
|
|
"filepath": d_conf["train"], |
|
|
"split": "train", |
|
|
}, |
|
|
), |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.VALIDATION, |
|
|
|
|
|
gen_kwargs={ |
|
|
"filepath": d_conf["validation"], |
|
|
"split": "validation" |
|
|
}, |
|
|
), |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TEST, |
|
|
|
|
|
gen_kwargs={ |
|
|
"filepath": d_conf["test"], |
|
|
"split": "test", |
|
|
}, |
|
|
), |
|
|
] + [ |
|
|
datasets.SplitGenerator( |
|
|
name=challenge_split, |
|
|
gen_kwargs={ |
|
|
"filepath": filename, |
|
|
"split": challenge_split, |
|
|
}, |
|
|
) |
|
|
for challenge_split, filename in challenge_sets |
|
|
] |
|
|
|
|
|
def _generate_examples( |
|
|
self, filepath, split |
|
|
): |
|
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
|
for row in f: |
|
|
data = json.loads(row) |
|
|
id_ = data["id"] |
|
|
|
|
|
|
|
|
data["gem_parent_id"] = f"{self.config.name}-{split}-{id_}" |
|
|
data["gem_id"] = f"{self.config.name}-{split}-{id_}" |
|
|
yield id_,data |
|
|
|