| import json |
| from typing import List |
|
|
| import datasets |
|
|
|
|
| _DESCRIPTION = "Russian dataset for detection and substraction of anglicisms." |
| _URLS = { |
| "train": "data/train.jsonl", |
| "test": "data/test.jsonl" |
| } |
| _LICENSE = "apache-2.0" |
|
|
|
|
| class RuAnglDataset(datasets.GeneratorBasedBuilder): |
| """Ru Anglicism Dataset""" |
|
|
| VERSION = datasets.Version("0.2.0") |
|
|
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig(name="default", version=VERSION, description=""), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "default" |
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| "word": datasets.Value("string"), |
| "form": datasets.Value("string"), |
| "sentence": datasets.Value("string"), |
| "paraphrase": datasets.Value("string") |
| } |
| ) |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| supervised_keys=("sentence", "paraphrase"), |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| downloaded_files = dl_manager.download_and_extract(_URLS) |
| return [ |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
| datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}) |
| ] |
|
|
| def _generate_examples(self, filepath): |
| with open(filepath, encoding="utf-8") as f: |
| for id_, row in enumerate(f): |
| data = json.loads(row) |
| yield id_, data |