Datasets:
Commit
·
03e54b8
1
Parent(s):
ac09efd
Upload datasetcard.md
Browse files- datasetcard.md +112 -0
datasetcard.md
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
# Full dataset card template at https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md
|
| 3 |
+
language:
|
| 4 |
+
- {en} # Example: fr
|
| 5 |
+
- {fr} # Example: en
|
| 6 |
+
license: {mit} # Example: apache-2.0 or any license from https://hf.co/docs/hub/repositories-licenses
|
| 7 |
+
license_name: {MIT} # If license = other (license not in https://hf.co/docs/hub/repositories-licenses), specify an id for it here, like `my-license-1.0`.
|
| 8 |
+
license_link: {license_link} # If license = other, specify "LICENSE" or "LICENSE.md" to link to a file of that name inside the repo, or a URL to a remote file.
|
| 9 |
+
license_details: {license_details} # Legacy, textual description of a custom license.
|
| 10 |
+
tags:
|
| 11 |
+
- {nlp-mt} # Example: audio
|
| 12 |
+
- {en-us_fr-ca} # Example: bio
|
| 13 |
+
- {machine-translation} # Example: natural-language-understanding
|
| 14 |
+
- {dialect-analysis} # Example: birds-classification
|
| 15 |
+
annotations_creators:
|
| 16 |
+
- {creator} # Example: crowdsourced, found, expert-generated, machine-generated
|
| 17 |
+
language_creators:
|
| 18 |
+
- {creator} # Example: crowdsourced, ...
|
| 19 |
+
language_details:
|
| 20 |
+
- {fr-CA} # Example: fr-FR
|
| 21 |
+
- {en-US} # Example: en-US
|
| 22 |
+
pretty_name: {Hansard} # Example: SQuAD
|
| 23 |
+
size_categories:
|
| 24 |
+
- {n<360K} # Example: n<1K, 100K<n<1M, …
|
| 25 |
+
source_datasets:
|
| 26 |
+
- {source_dataset_0} # Example: wikipedia
|
| 27 |
+
- {source_dataset_1} # Example: laion/laion-2b
|
| 28 |
+
task_categories: # Full list at https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts
|
| 29 |
+
- {machine-translation} # Example: question-answering
|
| 30 |
+
- {task_1} # Example: image-classification
|
| 31 |
+
task_ids:
|
| 32 |
+
- {subtask_0} # Example: extractive-qa
|
| 33 |
+
- {subtask_1} # Example: multi-class-image-classification
|
| 34 |
+
paperswithcode_id: {paperswithcode_id} # Dataset id on PapersWithCode (from the URL). Example for SQuAD: squad
|
| 35 |
+
configs: # Optional for datasets with multiple configurations like glue.
|
| 36 |
+
- {config_0} # Example for glue: sst2
|
| 37 |
+
- {config_1} # Example for glue: cola
|
| 38 |
+
|
| 39 |
+
# Optional. This part can be used to store the feature types and size of the dataset to be used in python. This can be automatically generated using the datasets-cli.
|
| 40 |
+
dataset_info:
|
| 41 |
+
features:
|
| 42 |
+
- name: {id} # Example: id
|
| 43 |
+
dtype: {string} # Example: int32
|
| 44 |
+
- name: {en_text} # Example: text
|
| 45 |
+
dtype: {string} # Example: string
|
| 46 |
+
- name: {fr_text} # Example: image
|
| 47 |
+
dtype: {string} # Example: image
|
| 48 |
+
# Example for SQuAD:
|
| 49 |
+
# - name: id
|
| 50 |
+
# dtype: string
|
| 51 |
+
# - name: title
|
| 52 |
+
# dtype: string
|
| 53 |
+
# - name: context
|
| 54 |
+
# dtype: string
|
| 55 |
+
# - name: question
|
| 56 |
+
# dtype: string
|
| 57 |
+
# - name: answers
|
| 58 |
+
# sequence:
|
| 59 |
+
# - name: text
|
| 60 |
+
# dtype: string
|
| 61 |
+
# - name: answer_start
|
| 62 |
+
# dtype: int32
|
| 63 |
+
config_name: {config_name} # Example for glue: sst2
|
| 64 |
+
splits:
|
| 65 |
+
- name: {split_name_0} # Example: train
|
| 66 |
+
num_bytes: {split_num_bytes_0} # Example for SQuAD: 79317110
|
| 67 |
+
num_examples: {split_num_examples_0} # Example for SQuAD: 87599
|
| 68 |
+
download_size: {dataset_download_size} # Example for SQuAD: 35142551
|
| 69 |
+
dataset_size: {dataset_size} # Example for SQuAD: 89789763
|
| 70 |
+
|
| 71 |
+
# It can also be a list of multiple configurations:
|
| 72 |
+
# ```yaml
|
| 73 |
+
# dataset_info:
|
| 74 |
+
# - config_name: {config0}
|
| 75 |
+
# features:
|
| 76 |
+
# ...
|
| 77 |
+
# - config_name: {config1}
|
| 78 |
+
# features:
|
| 79 |
+
# ...
|
| 80 |
+
# ```
|
| 81 |
+
|
| 82 |
+
# Optional. If you want your dataset to be protected behind a gate that users have to accept to access the dataset. More info at https://huggingface.co/docs/hub/datasets-gated
|
| 83 |
+
extra_gated_fields:
|
| 84 |
+
- {field_name_0}: {field_type_0} # Example: Name: text
|
| 85 |
+
- {field_name_1}: {field_type_1} # Example: Affiliation: text
|
| 86 |
+
- {field_name_2}: {field_type_2} # Example: Email: text
|
| 87 |
+
- {field_name_3}: {field_type_3} # Example for speech datasets: I agree to not attempt to determine the identity of speakers in this dataset: checkbox
|
| 88 |
+
extra_gated_prompt: {extra_gated_prompt} # Example for speech datasets: By clicking on “Access repository” below, you also agree to not attempt to determine the identity of speakers in the dataset.
|
| 89 |
+
|
| 90 |
+
# Optional. Add this if you want to encode a train and evaluation info in a structured way for AutoTrain or Evaluation on the Hub
|
| 91 |
+
train-eval-index:
|
| 92 |
+
- config: {config_name} # The dataset config name to use. Example for datasets without configs: default. Example for glue: sst2
|
| 93 |
+
task: {task_name} # The task category name (same as task_category). Example: question-answering
|
| 94 |
+
task_id: {task_type} # The AutoTrain task id. Example: extractive_question_answering
|
| 95 |
+
splits:
|
| 96 |
+
train_split: train # The split to use for training. Example: train
|
| 97 |
+
eval_split: validation # The split to use for evaluation. Example: test
|
| 98 |
+
col_mapping: # The columns mapping needed to configure the task_id.
|
| 99 |
+
# Example for extractive_question_answering:
|
| 100 |
+
# question: question
|
| 101 |
+
# context: context
|
| 102 |
+
# answers:
|
| 103 |
+
# text: text
|
| 104 |
+
# answer_start: answer_start
|
| 105 |
+
metrics:
|
| 106 |
+
- type: {metric_type} # The metric id. Example: wer. Use metric id from https://hf.co/metrics
|
| 107 |
+
name: {metric_name} # Tne metric name to be displayed. Example: Test WER
|
| 108 |
+
---
|
| 109 |
+
|
| 110 |
+
[//]: # (Valid license identifiers can be found in [our docs](https://huggingface.co/docs/hub/repositories-licenses).)
|
| 111 |
+
|
| 112 |
+
[//]: # (For the full dataset card template, see: [datasetcard_template.md file](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md).)
|