| # coding=utf-8 | |
| # Copyright 2021 The HuggingFace Datasets Authors and | |
| # the current dataset script contributor (Mathias Creutz). | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """Data loader for the Opusparcus paraphrase corpus.""" | |
| import csv | |
| import json | |
| import os | |
| import datasets | |
| import bz2 | |
| # Add BibTeX citation | |
| _CITATION = """\ | |
| @InProceedings{creutz:lrec2018, | |
| title = {Open Subtitles Paraphrase Corpus for Six Languages}, | |
| author={Mathias Creutz}, | |
| booktitle={Proceedings of the 11th edition of the Language Resources | |
| and Evaluation Conference (LREC 2018)}, | |
| year={2018}, | |
| month = {May 7-12}, | |
| address = {Miyazaki, Japan}, | |
| editor = {Nicoletta Calzolari (Conference chair) and Khalid Choukri | |
| and Christopher Cieri and Thierry Declerck and Sara Goggi and Koiti | |
| Hasida and Hitoshi Isahara and Bente Maegaard and Joseph Mariani and | |
| Hélène Mazo and Asuncion Moreno and Jan Odijk and Stelios Piperidis | |
| and Takenobu Tokunaga}, | |
| publisher = {European Language Resources Association (ELRA)}, | |
| isbn = {979-10-95546-00-9}, | |
| language = {english}, | |
| url={http://www.lrec-conf.org/proceedings/lrec2018/pdf/131.pdf} | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| Opusparcus is a paraphrase corpus for six European languages: German, | |
| English, Finnish, French, Russian, and Swedish. The paraphrases are | |
| extracted from the OpenSubtitles2016 corpus, which contains subtitles | |
| from movies and TV shows. | |
| """ | |
| _HOMEPAGE = "http://urn.fi/urn:nbn:fi:lb-2018021221" | |
| _LICENSE = "CC-BY-NC" | |
| # The HuggingFace dataset library doesn't host the datasets but only | |
| # points to the original files. This can be an arbitrary nested | |
| # dict/list of URLs (see below in `_split_generators` method): | |
| _URLs = { | |
| "validation": "validation.jsonl", | |
| "test": "test.jsonl", | |
| "validation.full": "validation.jsonl", | |
| "test.full": "test.jsonl", | |
| # NB: the "train" split file is defined dynamically inside the | |
| # `_split_generators` method | |
| } | |
| _VERSION = datasets.Version("1.0.0", "") | |
| class OpusparcusConfig(datasets.BuilderConfig): | |
| """BuilderConfig for Opusparcus.""" | |
| def __init__(self, lang=None, quality=100, **kwargs): | |
| """BuilderConfig for Wikipedia. | |
| Args: | |
| lang: string, two letter language code: | |
| de, en, fi, fr, ru, sv | |
| quality: int, filter training set according to quality: | |
| [ 60, 65, 70, 75, 80, 85, 90, 95, 100 ] | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super(OpusparcusConfig, self).__init__( | |
| name="{0}.{1}".format(lang, quality), | |
| description=\ | |
| "Opusparcus datasets for '{:s}', training set quality: {:d}"\ | |
| .format(lang, quality), | |
| **kwargs, | |
| ) | |
| self.lang = lang | |
| self.quality = quality | |
| # Languages in Opusparcus: German (de), English (en), Finnish (fi), | |
| # French (fr), Russian (ru), Swedish (sv): | |
| LANGS = [ "de", "en", "fi", "fr", "ru", "sv" ] | |
| # The training sets (train splits) come in eight sizes (95 .. 60), | |
| # where the number indicates the estimated proportion [%] of true | |
| # paraphrases in the set. The higher the number the smaller (but | |
| # ideally cleaner) the set. The lower the number, the larger (but | |
| # noisier) the set is. The smaller sets are included as subsets of | |
| # larger sets. The special value 100 matches no training data at all, | |
| # so if you are only interested in validation and test sets, you can | |
| # use the value 100 in order to save time and space. (The quality | |
| # value is irrelevant for the validation and test sets, which have | |
| # been annotated manually, and each example has an annotation score | |
| # attached to it.) | |
| QUALITIES = [ 100, 95, 90, 85, 80, 75, 70, 65, 60 ] | |
| class Opusparcus(datasets.GeneratorBasedBuilder): | |
| """Opusparcus is a paraphrase corpus for six European languages: | |
| German, English, Finnish, French, Russian, and Swedish. The | |
| paraphrases are extracted from the OpenSubtitles2016 corpus, which | |
| contains subtitles from movies and TV shows. | |
| The data in Opusparcus has been extracted from OpenSubtitles2016 | |
| (http://opus.nlpl.eu/OpenSubtitles2016.php), which is in turn | |
| based on data from http://www.opensubtitles.org/. | |
| For each target language, the Opusparcus data have been | |
| partitioned into three types of data sets: training, validation | |
| and test sets. The training sets are large, consisting of millions | |
| of sentence pairs, and have been compiled automatically, with the | |
| help of probabilistic ranking functions. The development and test | |
| sets consist of sentence pairs that have been annotated manually; | |
| each set contains approximately 1000 sentence pairs that have been | |
| verified to be acceptable paraphrases by two indepedent | |
| annotators. | |
| """ | |
| # This is a dataset with multiple configurations. | |
| BUILDER_CONFIG_CLASS = OpusparcusConfig | |
| # You can load configurations as follows: | |
| # data = datasets.load_dataset('GEM/opusparcus', lang='de') | |
| # data = datasets.load_dataset('GEM/opusparcus', lang='fr', quality='75') | |
| # etc. | |
| # | |
| # The language parameter is compulsory, whereas the quality | |
| # parameter is not (the default value being 100). | |
| # | |
| # The above commands can alternatively be expressed as: | |
| # data = datasets.load_dataset('GEM/opusparcus', 'de.100') | |
| # data = datasets.load_dataset('GEM/opusparcus', 'fr.75') | |
| BUILDER_CONFIGS = [ | |
| OpusparcusConfig(lang=lang, quality=quality, version=_VERSION) \ | |
| for lang in LANGS for quality in QUALITIES | |
| ] | |
| # There is no default configuration. User always needs to specify one: | |
| # DEFAULT_CONFIG_NAME = None | |
| def _info(self): | |
| # This method specifies the datasets.DatasetInfo object which | |
| # contains informations and typings for the dataset | |
| features = datasets.Features( | |
| { | |
| "lang": datasets.Value("string"), | |
| "sent1": datasets.Value("string"), | |
| "sent2": datasets.Value("string"), | |
| "annot_score": datasets.Value("float"), | |
| "gem_id": datasets.Value("string"), | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| # This is the description that will appear on the datasets page. | |
| description=_DESCRIPTION, | |
| # This defines the different columns of the dataset and their types | |
| features=features, | |
| # If there's a common (input, target) tuple from the features, | |
| # specify them here. They'll be used if as_supervised=True in | |
| # builder.as_dataset: | |
| supervised_keys=("sent1", "sent2"), # is this correct? | |
| # Homepage of the dataset for documentation | |
| homepage=_HOMEPAGE, | |
| # License for the dataset if available | |
| license=_LICENSE, | |
| # Citation for the dataset | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| # This method is tasked with downloading/extracting the data | |
| # and defining the splits depending on the configuration. | |
| # Several configurations are possible (listed in | |
| # BUILDER_CONFIGS), and the configuration selected by the user | |
| # is in self.config.name, which consists of two fields | |
| # separated by a period, containing the values of | |
| # self.config.lang and self.config.quality. | |
| if self.config.lang is None: | |
| # This is an error: nothing to do here if no language | |
| # has been defined: | |
| return [] | |
| # Select which file of the training data contains the matching data: | |
| if self.config.quality < 70: | |
| # We need to retrieve the largest training set file | |
| # containing the full training set for the desired language | |
| _URLs["train"] = "train_{0}.60.jsonl.bz2".format(self.config.lang) | |
| elif self.config.quality <= 95: | |
| # We can do with a smaller version of the training set | |
| # for the desired language | |
| _URLs["train"] = "train_{0}.70.jsonl.bz2".format(self.config.lang) | |
| # Otherwise, if the desired quality is above 95, we do not | |
| # download any training data, because there is no matching data. | |
| # The validation and test sets are so small that we do not perform | |
| # any filtering or optimization at this stage. | |
| # dl_manager is a datasets.download.DownloadManager, which | |
| # downloads and extracts the URLs | |
| # (It can accept any type or nested list/dict and will give | |
| # back the same structure with the url replaced with path to | |
| # local files. By default the archives will be extracted and | |
| # a path to a cached folder where they are extracted is | |
| # returned instead of the archive.) | |
| data_dir = dl_manager.download_and_extract(_URLs) | |
| splits = [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "lang": self.config.lang, | |
| "quality": 100, | |
| "filepath": data_dir["test"], | |
| "split": "test" | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "lang": self.config.lang, | |
| "quality": 100, | |
| "filepath": data_dir["validation"], | |
| "split": "validation", | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name="test.full", | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "lang": self.config.lang, | |
| "quality": 100, | |
| "filepath": data_dir["test.full"], | |
| "split": "test.full" | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name="validation.full", | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "lang": self.config.lang, | |
| "quality": 100, | |
| "filepath": data_dir["validation.full"], | |
| "split": "validation.full", | |
| }, | |
| ), | |
| ] | |
| # If the desired quality value is 100, no subset of the | |
| # training set is good enough, and we only produce validation | |
| # and test sets, in order to save space and time: | |
| if self.config.quality <= 95: | |
| # In this case there is matching training data, so we produce | |
| # a train split. | |
| splits.append( | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| # These kwargs will be passed to _generate_examples | |
| gen_kwargs={ | |
| "lang": self.config.lang, | |
| "quality": self.config.quality, | |
| "filepath": data_dir["train"], | |
| "split": "train", | |
| }, | |
| ) | |
| ) | |
| return splits | |
| def _generate_examples( | |
| self, lang, quality, filepath, split | |
| # method parameters are unpacked from `gen_kwargs` as given in | |
| # `_split_generators` | |
| ): | |
| """ Yields examples as (key, example) tuples. """ | |
| # This method handles input defined in _split_generators to | |
| # yield (key, example) tuples from the dataset. | |
| # The `key` is here for legacy reason (tfds) and is not important in itself. | |
| if split == datasets.Split.TRAIN: | |
| # Training sets are in jsonl files that have been compressed using bzip2. | |
| # They contain a field "quality" missing from the validation and test sets. | |
| # We also know that this file only contains the desired language, | |
| # because for the training sets the languages are in separate | |
| # files, and only the desired language has been downloaded. | |
| with bz2.open(filepath, "rt", encoding="utf-8") as f: | |
| for id_, row in enumerate(f): | |
| data = json.loads(row) | |
| if data["quality"] < quality: | |
| # The rest of this file contains too low quality data, | |
| # because the data is sorted best first | |
| break | |
| yield id_, { | |
| "lang": data["lang"], | |
| "sent1": data["sent1"], | |
| "sent2": data["sent2"], | |
| "annot_score": 0.0, # means there is no annotation | |
| "gem_id": data["gem_id"], | |
| } | |
| else: | |
| # The validation and test sets are in jsonl files. | |
| # They contain the fields "lang" and "annot_score" that we | |
| # filter on. If we ask for the full sets, we will keep | |
| # all data entries for the desired language, also the | |
| # sentence pairs that were not considered paraphrases by | |
| # the annotators: | |
| keep_all = (split == "validation.full" or split == "test.full") | |
| with open(filepath, encoding="utf-8") as f: | |
| for id_, row in enumerate(f): | |
| data = json.loads(row) | |
| if data["lang"] == lang: # only keep desired language | |
| if keep_all or data["annot_score"] >= 3.0: | |
| # for full sets keep all; | |
| # for standard test and validation sets, keep only | |
| # the actual paraphrases (annot_score >= 3.0 means | |
| # "good or mostly good example of paraphrases") | |
| yield id_, { | |
| "lang": data["lang"], | |
| "sent1": data["sent1"], | |
| "sent2": data["sent2"], | |
| "annot_score": data["annot_score"], | |
| "gem_id": data["gem_id"], | |
| } | |