qsimeon commited on
Commit
8396149
·
verified ·
1 Parent(s): eb7dd5b

Upload 2 files

Browse files

Template files for dataset scrip and config

Files changed (2) hide show
  1. datasetcard.md +113 -0
  2. new_dataset_script.py +172 -0
datasetcard.md ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ # Example metadata to be added to a dataset card.
3
+ # Full dataset card template at https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md
4
+ language:
5
+ - {lang_0} # Example: fr
6
+ - {lang_1} # Example: en
7
+ license: {license} # Example: apache-2.0 or any license from https://hf.co/docs/hub/repositories-licenses
8
+ license_name: {license_name} # If license = other (license not in https://hf.co/docs/hub/repositories-licenses), specify an id for it here, like `my-license-1.0`.
9
+ license_link: {license_link} # If license = other, specify "LICENSE" or "LICENSE.md" to link to a file of that name inside the repo, or a URL to a remote file.
10
+ license_details: {license_details} # Legacy, textual description of a custom license.
11
+ tags:
12
+ - {tag_0} # Example: audio
13
+ - {tag_1} # Example: bio
14
+ - {tag_2} # Example: natural-language-understanding
15
+ - {tag_3} # Example: birds-classification
16
+ annotations_creators:
17
+ - {creator} # Example: crowdsourced, found, expert-generated, machine-generated
18
+ language_creators:
19
+ - {creator} # Example: crowdsourced, ...
20
+ language_details:
21
+ - {bcp47_lang_0} # Example: fr-FR
22
+ - {bcp47_lang_1} # Example: en-US
23
+ pretty_name: {pretty_name} # Example: SQuAD
24
+ size_categories:
25
+ - {number_of_elements_in_dataset} # Example: n<1K, 100K<n<1M, …
26
+ source_datasets:
27
+ - {source_dataset_0} # Example: wikipedia
28
+ - {source_dataset_1} # Example: laion/laion-2b
29
+ task_categories: # Full list at https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts
30
+ - {task_0} # Example: question-answering
31
+ - {task_1} # Example: image-classification
32
+ task_ids:
33
+ - {subtask_0} # Example: extractive-qa
34
+ - {subtask_1} # Example: multi-class-image-classification
35
+ paperswithcode_id: {paperswithcode_id} # Dataset id on PapersWithCode (from the URL). Example for SQuAD: squad
36
+ configs: # Optional for datasets with multiple configurations like glue.
37
+ - {config_0} # Example for glue: sst2
38
+ - {config_1} # Example for glue: cola
39
+
40
+ # Optional. This part can be used to store the feature types and size of the dataset to be used in python. This can be automatically generated using the datasets-cli.
41
+ dataset_info:
42
+ features:
43
+ - name: {feature_name_0} # Example: id
44
+ dtype: {feature_dtype_0} # Example: int32
45
+ - name: {feature_name_1} # Example: text
46
+ dtype: {feature_dtype_1} # Example: string
47
+ - name: {feature_name_2} # Example: image
48
+ dtype: {feature_dtype_2} # Example: image
49
+ # Example for SQuAD:
50
+ # - name: id
51
+ # dtype: string
52
+ # - name: title
53
+ # dtype: string
54
+ # - name: context
55
+ # dtype: string
56
+ # - name: question
57
+ # dtype: string
58
+ # - name: answers
59
+ # sequence:
60
+ # - name: text
61
+ # dtype: string
62
+ # - name: answer_start
63
+ # dtype: int32
64
+ config_name: {config_name} # Example for glue: sst2
65
+ splits:
66
+ - name: {split_name_0} # Example: train
67
+ num_bytes: {split_num_bytes_0} # Example for SQuAD: 79317110
68
+ num_examples: {split_num_examples_0} # Example for SQuAD: 87599
69
+ download_size: {dataset_download_size} # Example for SQuAD: 35142551
70
+ dataset_size: {dataset_size} # Example for SQuAD: 89789763
71
+
72
+ # It can also be a list of multiple configurations:
73
+ # ```yaml
74
+ # dataset_info:
75
+ # - config_name: {config0}
76
+ # features:
77
+ # ...
78
+ # - config_name: {config1}
79
+ # features:
80
+ # ...
81
+ # ```
82
+
83
+ # Optional. If you want your dataset to be protected behind a gate that users have to accept to access the dataset. More info at https://huggingface.co/docs/hub/datasets-gated
84
+ extra_gated_fields:
85
+ - {field_name_0}: {field_type_0} # Example: Name: text
86
+ - {field_name_1}: {field_type_1} # Example: Affiliation: text
87
+ - {field_name_2}: {field_type_2} # Example: Email: text
88
+ - {field_name_3}: {field_type_3} # Example for speech datasets: I agree to not attempt to determine the identity of speakers in this dataset: checkbox
89
+ extra_gated_prompt: {extra_gated_prompt} # Example for speech datasets: By clicking on “Access repository” below, you also agree to not attempt to determine the identity of speakers in the dataset.
90
+
91
+ # Optional. Add this if you want to encode a train and evaluation info in a structured way for AutoTrain or Evaluation on the Hub
92
+ train-eval-index:
93
+ - config: {config_name} # The dataset config name to use. Example for datasets without configs: default. Example for glue: sst2
94
+ task: {task_name} # The task category name (same as task_category). Example: question-answering
95
+ task_id: {task_type} # The AutoTrain task id. Example: extractive_question_answering
96
+ splits:
97
+ train_split: train # The split to use for training. Example: train
98
+ eval_split: validation # The split to use for evaluation. Example: test
99
+ col_mapping: # The columns mapping needed to configure the task_id.
100
+ # Example for extractive_question_answering:
101
+ # question: question
102
+ # context: context
103
+ # answers:
104
+ # text: text
105
+ # answer_start: answer_start
106
+ metrics:
107
+ - type: {metric_type} # The metric id. Example: wer. Use metric id from https://hf.co/metrics
108
+ name: {metric_name} # Tne metric name to be displayed. Example: Test WER
109
+ ---
110
+
111
+ Valid license identifiers can be found in [our docs](https://huggingface.co/docs/hub/repositories-licenses).
112
+
113
+ For the full dataset card template, see: [datasetcard_template.md file](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md).
new_dataset_script.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ # TODO: Add BibTeX citation
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = """\
28
+ @InProceedings{huggingface:dataset,
29
+ title = {A great new dataset},
30
+ author={huggingface, Inc.
31
+ },
32
+ year={2020}
33
+ }
34
+ """
35
+
36
+ # TODO: Add description of the dataset here
37
+ # You can copy an official description
38
+ _DESCRIPTION = """\
39
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
40
+ """
41
+
42
+ # TODO: Add a link to an official homepage for the dataset here
43
+ _HOMEPAGE = ""
44
+
45
+ # TODO: Add the licence for the dataset here if you can find it
46
+ _LICENSE = ""
47
+
48
+ # TODO: Add link to the official dataset URLs here
49
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
+ _URLS = {
52
+ "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
53
+ "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
54
+ }
55
+
56
+
57
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
58
+ class NewDataset(datasets.GeneratorBasedBuilder):
59
+ """TODO: Short description of my dataset."""
60
+
61
+ VERSION = datasets.Version("1.1.0")
62
+
63
+ # This is an example of a dataset with multiple configurations.
64
+ # If you don't want/need to define several sub-sets in your dataset,
65
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
66
+
67
+ # If you need to make complex sub-parts in the datasets with configurable options
68
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
69
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
70
+
71
+ # You will be able to load one or the other configurations in the following list with
72
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
73
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
74
+ BUILDER_CONFIGS = [
75
+ datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
76
+ datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
77
+ ]
78
+
79
+ DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
80
+
81
+ def _info(self):
82
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
83
+ if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
84
+ features = datasets.Features(
85
+ {
86
+ "sentence": datasets.Value("string"),
87
+ "option1": datasets.Value("string"),
88
+ "answer": datasets.Value("string")
89
+ # These are the features of your dataset like images, labels ...
90
+ }
91
+ )
92
+ else: # This is an example to show how to have different features for "first_domain" and "second_domain"
93
+ features = datasets.Features(
94
+ {
95
+ "sentence": datasets.Value("string"),
96
+ "option2": datasets.Value("string"),
97
+ "second_domain_answer": datasets.Value("string")
98
+ # These are the features of your dataset like images, labels ...
99
+ }
100
+ )
101
+ return datasets.DatasetInfo(
102
+ # This is the description that will appear on the datasets page.
103
+ description=_DESCRIPTION,
104
+ # This defines the different columns of the dataset and their types
105
+ features=features, # Here we define them above because they are different between the two configurations
106
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
107
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
108
+ # supervised_keys=("sentence", "label"),
109
+ # Homepage of the dataset for documentation
110
+ homepage=_HOMEPAGE,
111
+ # License for the dataset if available
112
+ license=_LICENSE,
113
+ # Citation for the dataset
114
+ citation=_CITATION,
115
+ )
116
+
117
+ def _split_generators(self, dl_manager):
118
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
119
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
120
+
121
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
122
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
123
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
124
+ urls = _URLS[self.config.name]
125
+ data_dir = dl_manager.download_and_extract(urls)
126
+ return [
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TRAIN,
129
+ # These kwargs will be passed to _generate_examples
130
+ gen_kwargs={
131
+ "filepath": os.path.join(data_dir, "train.jsonl"),
132
+ "split": "train",
133
+ },
134
+ ),
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.VALIDATION,
137
+ # These kwargs will be passed to _generate_examples
138
+ gen_kwargs={
139
+ "filepath": os.path.join(data_dir, "dev.jsonl"),
140
+ "split": "dev",
141
+ },
142
+ ),
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.TEST,
145
+ # These kwargs will be passed to _generate_examples
146
+ gen_kwargs={
147
+ "filepath": os.path.join(data_dir, "test.jsonl"),
148
+ "split": "test"
149
+ },
150
+ ),
151
+ ]
152
+
153
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
154
+ def _generate_examples(self, filepath, split):
155
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
156
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
157
+ with open(filepath, encoding="utf-8") as f:
158
+ for key, row in enumerate(f):
159
+ data = json.loads(row)
160
+ if self.config.name == "first_domain":
161
+ # Yields examples as (key, example) tuples
162
+ yield key, {
163
+ "sentence": data["sentence"],
164
+ "option1": data["option1"],
165
+ "answer": "" if split == "test" else data["answer"],
166
+ }
167
+ else:
168
+ yield key, {
169
+ "sentence": data["sentence"],
170
+ "option2": data["option2"],
171
+ "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
172
+ }