| |
|
|
| import conllu |
|
|
| import datasets |
|
|
|
|
| _CITATION = r"""\ |
| {{ citation }} |
| """ |
|
|
| _DESCRIPTION = """\ |
| {{ description }} |
| """ |
|
|
| _NAMES = [ |
| {%- for name,metadata in data.items()|sort(attribute='1.dirname') %} |
| "{{ name }}", |
| {%- endfor %} |
| ] |
|
|
| _DESCRIPTIONS = { |
| {%- for name,metadata in data.items()|sort(attribute='1.dirname') %} |
| "{{ name }}": """{{ metadata.summary }}""", |
| {%- endfor %} |
| } |
|
|
| _LICENSES = { |
| {%- for name,metadata in data.items()|sort(attribute='1.dirname') %} |
| "{{ name }}": "{{ metadata.license }}", |
| {%- endfor %} |
| } |
|
|
| _PREFIX = "https://raw.githubusercontent.com/UniversalDependencies/" |
| _UD_DATASETS = { |
| {%- for name,metadata in data.items()|sort(attribute='1.dirname') %} |
| "{{ name }}": { |
| {%- for fileset_split_name,fileset_split_data in metadata.splits.items() %} |
| "{{ fileset_split_name }}": {{ fileset_split_data.files }}, |
| {%- endfor %} |
| }, |
| {%- endfor %} |
| } |
|
|
|
|
| class UniversaldependenciesConfig(datasets.BuilderConfig): |
| """BuilderConfig for Universal dependencies""" |
|
|
| def __init__(self, data_url, **kwargs): |
| super(UniversaldependenciesConfig, |
| self).__init__(version=datasets.Version("{{ ud_ver }}.0", ""), **kwargs) |
|
|
| self.data_url = data_url |
|
|
|
|
| class UniversalDependencies(datasets.GeneratorBasedBuilder): |
| VERSION = datasets.Version("{{ ud_ver }}.0") |
| BUILDER_CONFIGS = [ |
| UniversaldependenciesConfig( |
| data_url="https://github.com/UniversalDependencies/" + _UD_DATASETS[name]["test"][0].split("/")[0], |
| name=name, |
| description=_DESCRIPTIONS[name], |
| |
| ) |
| for name in _NAMES |
| ] |
| BUILDER_CONFIG_CLASS = UniversaldependenciesConfig |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "idx": datasets.Value("string"), |
| "text": datasets.Value("string"), |
| "tokens": datasets.Sequence(datasets.Value("string")), |
| "lemmas": datasets.Sequence(datasets.Value("string")), |
| "upos": datasets.Sequence( |
| datasets.features.ClassLabel( |
| names=[ |
| "NOUN", |
| "PUNCT", |
| "ADP", |
| "NUM", |
| "SYM", |
| "SCONJ", |
| "ADJ", |
| "PART", |
| "DET", |
| "CCONJ", |
| "PROPN", |
| "PRON", |
| "X", |
| "_", |
| "ADV", |
| "INTJ", |
| "VERB", |
| "AUX", |
| ] |
| ) |
| ), |
| "xpos": datasets.Sequence(datasets.Value("string")), |
| "feats": datasets.Sequence(datasets.Value("string")), |
| "head": datasets.Sequence(datasets.Value("string")), |
| "deprel": datasets.Sequence(datasets.Value("string")), |
| "deps": datasets.Sequence(datasets.Value("string")), |
| "misc": datasets.Sequence(datasets.Value("string")), |
| } |
| ), |
| supervised_keys=None, |
| homepage="https://universaldependencies.org/", |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| urls_to_download = {} |
| for split, address in _UD_DATASETS[self.config.name].items(): |
| urls_to_download[split] = [] |
| if isinstance(address, list): |
| for add in address: |
| urls_to_download[split].append(_PREFIX + add) |
| else: |
| urls_to_download[split].append(_PREFIX + address) |
|
|
| downloaded_files = dl_manager.download_and_extract(urls_to_download) |
| splits = [] |
|
|
| if "train" in downloaded_files: |
| splits.append( |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}) |
| ) |
|
|
| if "dev" in downloaded_files: |
| splits.append( |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]} |
| ) |
| ) |
|
|
| if "test" in downloaded_files: |
| splits.append( |
| datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}) |
| ) |
|
|
| return splits |
|
|
| def _generate_examples(self, filepath): |
| id = 0 |
| for path in filepath: |
| with open(path, "r", encoding="utf-8") as data_file: |
| tokenlist = list(conllu.parse_incr(data_file)) |
| for sent in tokenlist: |
| if "sent_id" in sent.metadata: |
| idx = sent.metadata["sent_id"] |
| else: |
| idx = id |
|
|
| tokens = [token["form"] for token in sent] |
|
|
| if "text" in sent.metadata: |
| txt = sent.metadata["text"] |
| else: |
| txt = " ".join(tokens) |
|
|
| yield id, { |
| "idx": str(idx), |
| "text": txt, |
| "tokens": [token["form"] for token in sent], |
| "lemmas": [token["lemma"] for token in sent], |
| "upos": [token["upos"] for token in sent], |
| "xpos": [token["xpos"] for token in sent], |
| "feats": [str(token["feats"]) for token in sent], |
| "head": [str(token["head"]) for token in sent], |
| "deprel": [str(token["deprel"]) for token in sent], |
| "deps": [str(token["deps"]) for token in sent], |
| "misc": [str(token["misc"]) for token in sent], |
| } |
| id += 1 |
|
|