File size: 2,458 Bytes
8ac0aac
 
6707a5a
 
 
 
ed4153e
 
 
 
77daad1
 
 
8ac0aac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
---
license: apache-2.0
task_categories:
- text2text-generation
language:
- en
tags:
- code
size_categories:
- 1K<n<10K
Libraries:
- croissant
- Datasets
---
language:
- {lang_0}  # Example: fr
- {lang_1}  # Example: en
license: {license}  # Example: apache-2.0 or any license from https://hf.co/docs/hub/repositories-licenses
license_name: {license_name}  # If license = other (license not in https://hf.co/docs/hub/repositories-licenses), specify an id for it here, like `my-license-1.0`.
license_link: {license_link}  # If license = other, specify "LICENSE" or "LICENSE.md" to link to a file of that name inside the repo, or a URL to a remote file.
license_details: {license_details}  # Legacy, textual description of a custom license.
tags:
- {tag_0}  # Example: audio
- {tag_1}  # Example: bio
- {tag_2}  # Example: natural-language-understanding
- {tag_3}  # Example: birds-classification
annotations_creators:
- {creator}  # Example: crowdsourced, found, expert-generated, machine-generated
language_creators:
- {creator}  # Example: crowdsourced, ...
language_details:
- {bcp47_lang_0}  # Example: fr-FR
- {bcp47_lang_1}  # Example: en-US
pretty_name: {pretty_name}  # Example: SQuAD
size_categories:
- {number_of_elements_in_dataset}  # Example: n<1K, 100K<n<1M, …
source_datasets:
- {source_dataset_0}  # Example: wikipedia
- {source_dataset_1}  # Example: laion/laion-2b
task_categories:  # Full list at https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts
- {task_0}  # Example: question-answering
- {task_1}  # Example: image-classification
task_ids:
- {subtask_0}  # Example: extractive-qa
- {subtask_1}  # Example: multi-class-image-classification
paperswithcode_id: {paperswithcode_id}  # Dataset id on PapersWithCode (from the URL). Example for SQuAD: squad
configs:  # Optional. This can be used to pass additional parameters to the dataset loader, such as `data_files`, `data_dir`, and any builder-specific parameters  
- config_name: {config_name_0}  # Name of the dataset subset, if applicable. Example: default
  data_files:
  - split: {split_name_0}  # Example: train
    path: {file_path_0}  # Example: data.csv
  - split: {split_name_1}  # Example: test
    path: {file_path_1}   # Example: holdout.csv
- config_name: {config_name_1}  # Name of the dataset subset. Example: processed
  data_files:
  - split: {split_name_3}  # Example: train
    path: {file_path_3}  # Example: data_processed.csv