iiegn commited on
Commit
6b815a0
·
verified ·
1 Parent(s): 285c5ff

Add tools/templates/

Browse files
tools/templates/README.tmpl ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### THIS IS A GENERATED FILE.
2
+ ---
3
+ annotations_creators:
4
+ - expert-generated
5
+
6
+ language_creators:
7
+ - crowdsourced
8
+
9
+ language:
10
+ {%- set languages = [] -%}
11
+ {%- for dirname,metadata in data.items()|sort(attribute='0') -%}
12
+ {{ languages.append(metadata.lcode)|default("", True)}}
13
+ {%- endfor -%}
14
+ {%- for language in languages|unique %}
15
+ - {{ language }}
16
+ {%- endfor %}
17
+
18
+ license:
19
+ - unknown
20
+
21
+ multilinguality:
22
+ - multilingual
23
+
24
+ size_categories:
25
+ - '1K<n<10K'
26
+
27
+ source_datasets:
28
+ - original
29
+
30
+ task_categories:
31
+ - token-classification
32
+
33
+ task_ids:
34
+ - parsing
35
+
36
+ paperswithcode_id: universal-dependencies
37
+ pretty_name: Universal Dependencies Treebank
38
+
39
+ tags:
40
+ - constituency-parsing
41
+ - dependency-parsing
42
+
43
+ dataset_info:
44
+ {%- for dirnme,metadata in data.items()|sort(attribute='0') %}
45
+ - config_name: {{ metadata.name }}
46
+ features:
47
+ - name: idx
48
+ dtype: string
49
+ - name: text
50
+ dtype: string
51
+ - name: tokens
52
+ sequence: string
53
+ - name: lemmas
54
+ sequence: string
55
+ - name: upos
56
+ sequence:
57
+ class_label:
58
+ names:
59
+ '0': NOUN
60
+ '1': PUNCT
61
+ '2': ADP
62
+ '3': NUM
63
+ '4': SYM
64
+ '5': SCONJ
65
+ '6': ADJ
66
+ '7': PART
67
+ '8': DET
68
+ '9': CCONJ
69
+ '10': PROPN
70
+ '11': PRON
71
+ '12': X
72
+ '13': _
73
+ '14': ADV
74
+ '15': INTJ
75
+ '16': VERB
76
+ '17': AUX
77
+ - name: xpos
78
+ sequence: string
79
+ - name: feats
80
+ sequence: string
81
+ - name: head
82
+ sequence: string
83
+ - name: deprel
84
+ sequence: string
85
+ - name: deps
86
+ sequence: string
87
+ - name: misc
88
+ sequence: string
89
+ splits:
90
+ {%- set ns = namespace(dataset_size=0) -%}
91
+ {%- for fileset_split_name,fileset_split_data in metadata.splits.items() %}
92
+ - name: {{ fileset_split_name }}
93
+ num_bytes: {{ fileset_split_data.num_bytes }}{%- set ns.dataset_size = ns.dataset_size + fileset_split_data.num_bytes %}
94
+ num_examples: {{ fileset_split_data.num_sentences }}
95
+ num_sentences: {{ fileset_split_data.num_sentences }}
96
+ num_tokens: {{ fileset_split_data.num_tokens }}
97
+ num_words: {{ fileset_split_data.num_words }}
98
+ {%- endfor %}
99
+ dataset_size: {{ ns.dataset_size }}
100
+ {%- endfor %}
101
+
102
+ config_names:
103
+ {%- for dirname,metadata in data.items()|sort(attribute='0') %}
104
+ - {{ metadata.name }}
105
+ {%- endfor %}
106
+ ---
107
+
108
+ # Dataset Card for Universal Dependencies Treebank
109
+
110
+ ## Table of Contents
111
+ - [Dataset Description](#dataset-description)
112
+ - [Dataset Summary](#dataset-summary)
113
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
114
+ - [Languages](#languages)
115
+ - [Dataset Structure](#dataset-structure)
116
+ - [Data Instances](#data-instances)
117
+ - [Data Fields](#data-fields)
118
+ - [Data Splits](#data-splits)
119
+ - [Dataset Creation](#dataset-creation)
120
+ - [Curation Rationale](#curation-rationale)
121
+ - [Source Data](#source-data)
122
+ - [Annotations](#annotations)
123
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
124
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
125
+ - [Social Impact of Dataset](#social-impact-of-dataset)
126
+ - [Discussion of Biases](#discussion-of-biases)
127
+ - [Other Known Limitations](#other-known-limitations)
128
+ - [Additional Information](#additional-information)
129
+ - [Dataset Curators](#dataset-curators)
130
+ - [Licensing Information](#licensing-information)
131
+ - [Citation Information](#citation-information)
132
+ - [Contributions](#contributions)
133
+
134
+ ## Dataset Description
135
+
136
+ - **Homepage:** [Universal Dependencies](https://universaldependencies.org/)
137
+ - **Repository:**
138
+ - **Paper:**
139
+ - **Leaderboard:**
140
+ - **Point of Contact:**
141
+
142
+ ### Dataset Summary
143
+
144
+ [More Information Needed]
145
+
146
+ ### Supported Tasks and Leaderboards
147
+
148
+ [More Information Needed]
149
+
150
+ ### Languages
151
+
152
+ [More Information Needed]
153
+
154
+ ## Dataset Structure
155
+
156
+ ### Data Instances
157
+
158
+ [More Information Needed]
159
+
160
+ ### Data Fields
161
+
162
+ [More Information Needed]
163
+
164
+ ### Data Splits
165
+
166
+ [More Information Needed]
167
+
168
+ ## Dataset Creation
169
+
170
+ ### Curation Rationale
171
+
172
+ [More Information Needed]
173
+
174
+ ### Source Data
175
+
176
+ #### Initial Data Collection and Normalization
177
+
178
+ [More Information Needed]
179
+
180
+ #### Who are the source language producers?
181
+
182
+ [More Information Needed]
183
+
184
+ ### Annotations
185
+
186
+ #### Annotation process
187
+
188
+ [More Information Needed]
189
+
190
+ #### Who are the annotators?
191
+
192
+ [More Information Needed]
193
+
194
+ ### Personal and Sensitive Information
195
+
196
+ [More Information Needed]
197
+
198
+ ## Considerations for Using the Data
199
+
200
+ ### Social Impact of Dataset
201
+
202
+ [More Information Needed]
203
+
204
+ ### Discussion of Biases
205
+
206
+ [More Information Needed]
207
+
208
+ ### Other Known Limitations
209
+
210
+ [More Information Needed]
211
+
212
+ ## Additional Information
213
+
214
+ ### Dataset Curators
215
+
216
+ [More Information Needed]
217
+
218
+ ### Licensing Information
219
+
220
+ [More Information Needed]
221
+
222
+ ### Citation Information
223
+
224
+ [More Information Needed]
225
+ ### Contributions
226
+
227
+ Thanks to [@patrickvonplaten](https://github.com/patrickvonplaten), [@jplu](https://github.com/jplu) for adding this dataset.
tools/templates/universal_dependencies.tmpl ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### THIS IS A GENERATED FILE.
2
+
3
+ import conllu
4
+
5
+ import datasets
6
+
7
+
8
+ _CITATION = r"""\
9
+ {{ citation }}
10
+ """ # noqa: W605
11
+
12
+ _DESCRIPTION = """\
13
+ {{ description }}
14
+ """
15
+
16
+ _NAMES = [
17
+ {%- for dirname,metadata in data.items()|sort(attribute='0') %}
18
+ "{{ metadata.name }}",
19
+ {%- endfor %}
20
+ ]
21
+
22
+ _DESCRIPTIONS = {
23
+ {%- for dirname,metadata in data.items()|sort(attribute='0') %}
24
+ "{{ metadata.name }}": """{{ metadata.summary }}""",
25
+ {%- endfor %}
26
+ }
27
+
28
+ _LICENSES = {
29
+ {%- for dirname,metadata in data.items()|sort(attribute='0') %}
30
+ "{{ metadata.name }}": "{{ metadata.license }}",
31
+ {%- endfor %}
32
+ }
33
+
34
+ _PREFIX = "https://raw.githubusercontent.com/UniversalDependencies/"
35
+ _UD_DATASETS = {
36
+ {%- for dirname,metadata in data.items()|sort(attribute='0') %}
37
+ "{{ metadata.name }}": {
38
+ {%- for fileset_split_name,fileset_split_data in metadata.splits.items() %}
39
+ "{{ fileset_split_name }}": {{ fileset_split_data.files }},
40
+ {%- endfor %}
41
+ },
42
+ {%- endfor %}
43
+ }
44
+
45
+
46
+ class UniversaldependenciesConfig(datasets.BuilderConfig):
47
+ """BuilderConfig for Universal dependencies"""
48
+
49
+ def __init__(self, data_url, **kwargs):
50
+ super(UniversaldependenciesConfig,
51
+ self).__init__(version=datasets.Version("{{ ud_ver }}.0", ""), **kwargs)
52
+
53
+ self.data_url = data_url
54
+
55
+
56
+ class UniversalDependencies(datasets.GeneratorBasedBuilder):
57
+ VERSION = datasets.Version("{{ ud_ver }}.0")
58
+ BUILDER_CONFIGS = [
59
+ UniversaldependenciesConfig(
60
+ name=name,
61
+ description=_DESCRIPTIONS[name],
62
+ license=_LICENSES[name],
63
+ data_url="https://github.com/UniversalDependencies/" + _UD_DATASETS[name]["test"].split("/")[0],
64
+ )
65
+ for name in _NAMES
66
+ ]
67
+ BUILDER_CONFIG_CLASS = UniversaldependenciesConfig
68
+
69
+ def _info(self):
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=datasets.Features(
73
+ {
74
+ "idx": datasets.Value("string"),
75
+ "text": datasets.Value("string"),
76
+ "tokens": datasets.Sequence(datasets.Value("string")),
77
+ "lemmas": datasets.Sequence(datasets.Value("string")),
78
+ "upos": datasets.Sequence(
79
+ datasets.features.ClassLabel(
80
+ names=[
81
+ "NOUN",
82
+ "PUNCT",
83
+ "ADP",
84
+ "NUM",
85
+ "SYM",
86
+ "SCONJ",
87
+ "ADJ",
88
+ "PART",
89
+ "DET",
90
+ "CCONJ",
91
+ "PROPN",
92
+ "PRON",
93
+ "X",
94
+ "_",
95
+ "ADV",
96
+ "INTJ",
97
+ "VERB",
98
+ "AUX",
99
+ ]
100
+ )
101
+ ),
102
+ "xpos": datasets.Sequence(datasets.Value("string")),
103
+ "feats": datasets.Sequence(datasets.Value("string")),
104
+ "head": datasets.Sequence(datasets.Value("string")),
105
+ "deprel": datasets.Sequence(datasets.Value("string")),
106
+ "deps": datasets.Sequence(datasets.Value("string")),
107
+ "misc": datasets.Sequence(datasets.Value("string")),
108
+ }
109
+ ),
110
+ supervised_keys=None,
111
+ homepage="https://universaldependencies.org/",
112
+ citation=_CITATION,
113
+ )
114
+
115
+ def _split_generators(self, dl_manager):
116
+ """Returns SplitGenerators."""
117
+ urls_to_download = {}
118
+ for split, address in _UD_DATASETS[self.config.name].items():
119
+ urls_to_download[split] = []
120
+ if isinstance(address, list):
121
+ for add in address:
122
+ urls_to_download[split].append(_PREFIX + add)
123
+ else:
124
+ urls_to_download[split].append(_PREFIX + address)
125
+
126
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
127
+ splits = []
128
+
129
+ if "train" in downloaded_files:
130
+ splits.append(
131
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})
132
+ )
133
+
134
+ if "dev" in downloaded_files:
135
+ splits.append(
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}
138
+ )
139
+ )
140
+
141
+ if "test" in downloaded_files:
142
+ splits.append(
143
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
144
+ )
145
+
146
+ return splits
147
+
148
+ def _generate_examples(self, filepath):
149
+ id = 0
150
+ for path in filepath:
151
+ with open(path, "r", encoding="utf-8") as data_file:
152
+ tokenlist = list(conllu.parse_incr(data_file))
153
+ for sent in tokenlist:
154
+ if "sent_id" in sent.metadata:
155
+ idx = sent.metadata["sent_id"]
156
+ else:
157
+ idx = id
158
+
159
+ tokens = [token["form"] for token in sent]
160
+
161
+ if "text" in sent.metadata:
162
+ txt = sent.metadata["text"]
163
+ else:
164
+ txt = " ".join(tokens)
165
+
166
+ yield id, {
167
+ "idx": str(idx),
168
+ "text": txt,
169
+ "tokens": [token["form"] for token in sent],
170
+ "lemmas": [token["lemma"] for token in sent],
171
+ "upos": [token["upos"] for token in sent],
172
+ "xpos": [token["xpos"] for token in sent],
173
+ "feats": [str(token["feats"]) for token in sent],
174
+ "head": [str(token["head"]) for token in sent],
175
+ "deprel": [str(token["deprel"]) for token in sent],
176
+ "deps": [str(token["deps"]) for token in sent],
177
+ "misc": [str(token["misc"]) for token in sent],
178
+ }
179
+ id += 1