Datasets:
Tasks:
Audio Classification
Formats:
parquet
Sub-tasks:
keyword-spotting
Languages:
English
Size:
10K - 100K
ArXiv:
License:
Commit ·
d80d276
0
Parent(s):
Duplicate from google/speech_commands
Browse filesCo-authored-by: Parquet-converter (BOT) <parquet-converter@users.noreply.huggingface.co>
- .gitattributes +27 -0
- README.md +411 -0
- speech_commands.py +229 -0
.gitattributes
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
annotations_creators:
|
| 3 |
+
- other
|
| 4 |
+
language_creators:
|
| 5 |
+
- crowdsourced
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
license:
|
| 9 |
+
- cc-by-4.0
|
| 10 |
+
multilinguality:
|
| 11 |
+
- monolingual
|
| 12 |
+
size_categories:
|
| 13 |
+
- 100K<n<1M
|
| 14 |
+
- 10K<n<100K
|
| 15 |
+
source_datasets:
|
| 16 |
+
- original
|
| 17 |
+
task_categories:
|
| 18 |
+
- audio-classification
|
| 19 |
+
task_ids:
|
| 20 |
+
- keyword-spotting
|
| 21 |
+
pretty_name: SpeechCommands
|
| 22 |
+
dataset_info:
|
| 23 |
+
- config_name: v0.01
|
| 24 |
+
features:
|
| 25 |
+
- name: file
|
| 26 |
+
dtype: string
|
| 27 |
+
- name: audio
|
| 28 |
+
dtype:
|
| 29 |
+
audio:
|
| 30 |
+
sampling_rate: 16000
|
| 31 |
+
- name: label
|
| 32 |
+
dtype:
|
| 33 |
+
class_label:
|
| 34 |
+
names:
|
| 35 |
+
'0': 'yes'
|
| 36 |
+
'1': 'no'
|
| 37 |
+
'2': up
|
| 38 |
+
'3': down
|
| 39 |
+
'4': left
|
| 40 |
+
'5': right
|
| 41 |
+
'6': 'on'
|
| 42 |
+
'7': 'off'
|
| 43 |
+
'8': stop
|
| 44 |
+
'9': go
|
| 45 |
+
'10': zero
|
| 46 |
+
'11': one
|
| 47 |
+
'12': two
|
| 48 |
+
'13': three
|
| 49 |
+
'14': four
|
| 50 |
+
'15': five
|
| 51 |
+
'16': six
|
| 52 |
+
'17': seven
|
| 53 |
+
'18': eight
|
| 54 |
+
'19': nine
|
| 55 |
+
'20': bed
|
| 56 |
+
'21': bird
|
| 57 |
+
'22': cat
|
| 58 |
+
'23': dog
|
| 59 |
+
'24': happy
|
| 60 |
+
'25': house
|
| 61 |
+
'26': marvin
|
| 62 |
+
'27': sheila
|
| 63 |
+
'28': tree
|
| 64 |
+
'29': wow
|
| 65 |
+
'30': _silence_
|
| 66 |
+
- name: is_unknown
|
| 67 |
+
dtype: bool
|
| 68 |
+
- name: speaker_id
|
| 69 |
+
dtype: string
|
| 70 |
+
- name: utterance_id
|
| 71 |
+
dtype: int8
|
| 72 |
+
splits:
|
| 73 |
+
- name: train
|
| 74 |
+
num_bytes: 1626283624
|
| 75 |
+
num_examples: 51093
|
| 76 |
+
- name: validation
|
| 77 |
+
num_bytes: 217204539
|
| 78 |
+
num_examples: 6799
|
| 79 |
+
- name: test
|
| 80 |
+
num_bytes: 98979965
|
| 81 |
+
num_examples: 3081
|
| 82 |
+
download_size: 1454702755
|
| 83 |
+
dataset_size: 1942468128
|
| 84 |
+
- config_name: v0.02
|
| 85 |
+
features:
|
| 86 |
+
- name: file
|
| 87 |
+
dtype: string
|
| 88 |
+
- name: audio
|
| 89 |
+
dtype:
|
| 90 |
+
audio:
|
| 91 |
+
sampling_rate: 16000
|
| 92 |
+
- name: label
|
| 93 |
+
dtype:
|
| 94 |
+
class_label:
|
| 95 |
+
names:
|
| 96 |
+
'0': 'yes'
|
| 97 |
+
'1': 'no'
|
| 98 |
+
'2': up
|
| 99 |
+
'3': down
|
| 100 |
+
'4': left
|
| 101 |
+
'5': right
|
| 102 |
+
'6': 'on'
|
| 103 |
+
'7': 'off'
|
| 104 |
+
'8': stop
|
| 105 |
+
'9': go
|
| 106 |
+
'10': zero
|
| 107 |
+
'11': one
|
| 108 |
+
'12': two
|
| 109 |
+
'13': three
|
| 110 |
+
'14': four
|
| 111 |
+
'15': five
|
| 112 |
+
'16': six
|
| 113 |
+
'17': seven
|
| 114 |
+
'18': eight
|
| 115 |
+
'19': nine
|
| 116 |
+
'20': bed
|
| 117 |
+
'21': bird
|
| 118 |
+
'22': cat
|
| 119 |
+
'23': dog
|
| 120 |
+
'24': happy
|
| 121 |
+
'25': house
|
| 122 |
+
'26': marvin
|
| 123 |
+
'27': sheila
|
| 124 |
+
'28': tree
|
| 125 |
+
'29': wow
|
| 126 |
+
'30': backward
|
| 127 |
+
'31': forward
|
| 128 |
+
'32': follow
|
| 129 |
+
'33': learn
|
| 130 |
+
'34': visual
|
| 131 |
+
'35': _silence_
|
| 132 |
+
- name: is_unknown
|
| 133 |
+
dtype: bool
|
| 134 |
+
- name: speaker_id
|
| 135 |
+
dtype: string
|
| 136 |
+
- name: utterance_id
|
| 137 |
+
dtype: int8
|
| 138 |
+
splits:
|
| 139 |
+
- name: train
|
| 140 |
+
num_bytes: 2684381672
|
| 141 |
+
num_examples: 84848
|
| 142 |
+
- name: validation
|
| 143 |
+
num_bytes: 316435178
|
| 144 |
+
num_examples: 9982
|
| 145 |
+
- name: test
|
| 146 |
+
num_bytes: 157096106
|
| 147 |
+
num_examples: 4890
|
| 148 |
+
download_size: 2285975869
|
| 149 |
+
dataset_size: 3157912956
|
| 150 |
+
config_names:
|
| 151 |
+
- v0.01
|
| 152 |
+
- v0.02
|
| 153 |
+
---
|
| 154 |
+
|
| 155 |
+
# Dataset Card for SpeechCommands
|
| 156 |
+
|
| 157 |
+
## Table of Contents
|
| 158 |
+
- [Table of Contents](#table-of-contents)
|
| 159 |
+
- [Dataset Description](#dataset-description)
|
| 160 |
+
- [Dataset Summary](#dataset-summary)
|
| 161 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
| 162 |
+
- [Languages](#languages)
|
| 163 |
+
- [Dataset Structure](#dataset-structure)
|
| 164 |
+
- [Data Instances](#data-instances)
|
| 165 |
+
- [Data Fields](#data-fields)
|
| 166 |
+
- [Data Splits](#data-splits)
|
| 167 |
+
- [Dataset Creation](#dataset-creation)
|
| 168 |
+
- [Curation Rationale](#curation-rationale)
|
| 169 |
+
- [Source Data](#source-data)
|
| 170 |
+
- [Annotations](#annotations)
|
| 171 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
| 172 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
| 173 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
| 174 |
+
- [Discussion of Biases](#discussion-of-biases)
|
| 175 |
+
- [Other Known Limitations](#other-known-limitations)
|
| 176 |
+
- [Additional Information](#additional-information)
|
| 177 |
+
- [Dataset Curators](#dataset-curators)
|
| 178 |
+
- [Licensing Information](#licensing-information)
|
| 179 |
+
- [Citation Information](#citation-information)
|
| 180 |
+
- [Contributions](#contributions)
|
| 181 |
+
|
| 182 |
+
## Dataset Description
|
| 183 |
+
|
| 184 |
+
- **Homepage:** https://www.tensorflow.org/datasets/catalog/speech_commands
|
| 185 |
+
- **Repository:** [More Information Needed]
|
| 186 |
+
- **Paper:** [Speech Commands: A Dataset for Limited-Vocabulary Speech Recognition](https://arxiv.org/pdf/1804.03209.pdf)
|
| 187 |
+
- **Leaderboard:** [More Information Needed]
|
| 188 |
+
- **Point of Contact:** Pete Warden, petewarden@google.com
|
| 189 |
+
|
| 190 |
+
### Dataset Summary
|
| 191 |
+
|
| 192 |
+
This is a set of one-second .wav audio files, each containing a single spoken
|
| 193 |
+
English word or background noise. These words are from a small set of commands, and are spoken by a
|
| 194 |
+
variety of different speakers. This data set is designed to help train simple
|
| 195 |
+
machine learning models. It is covered in more detail at [https://arxiv.org/abs/1804.03209](https://arxiv.org/abs/1804.03209).
|
| 196 |
+
|
| 197 |
+
Version 0.01 of the data set (configuration `"v0.01"`) was released on August 3rd 2017 and contains
|
| 198 |
+
64,727 audio files.
|
| 199 |
+
|
| 200 |
+
Version 0.02 of the data set (configuration `"v0.02"`) was released on April 11th 2018 and
|
| 201 |
+
contains 105,829 audio files.
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
### Supported Tasks and Leaderboards
|
| 205 |
+
|
| 206 |
+
* `keyword-spotting`: the dataset can be used to train and evaluate keyword
|
| 207 |
+
spotting systems. The task is to detect preregistered keywords by classifying utterances
|
| 208 |
+
into a predefined set of words. The task is usually performed on-device for the
|
| 209 |
+
fast response time. Thus, accuracy, model size, and inference time are all crucial.
|
| 210 |
+
|
| 211 |
+
### Languages
|
| 212 |
+
|
| 213 |
+
The language data in SpeechCommands is in English (BCP-47 `en`).
|
| 214 |
+
|
| 215 |
+
## Dataset Structure
|
| 216 |
+
|
| 217 |
+
### Data Instances
|
| 218 |
+
|
| 219 |
+
Example of a core word (`"label"` is a word, `"is_unknown"` is `False`):
|
| 220 |
+
```python
|
| 221 |
+
{
|
| 222 |
+
"file": "no/7846fd85_nohash_0.wav",
|
| 223 |
+
"audio": {
|
| 224 |
+
"path": "no/7846fd85_nohash_0.wav",
|
| 225 |
+
"array": array([ -0.00021362, -0.00027466, -0.00036621, ..., 0.00079346,
|
| 226 |
+
0.00091553, 0.00079346]),
|
| 227 |
+
"sampling_rate": 16000
|
| 228 |
+
},
|
| 229 |
+
"label": 1, # "no"
|
| 230 |
+
"is_unknown": False,
|
| 231 |
+
"speaker_id": "7846fd85",
|
| 232 |
+
"utterance_id": 0
|
| 233 |
+
}
|
| 234 |
+
```
|
| 235 |
+
|
| 236 |
+
Example of an auxiliary word (`"label"` is a word, `"is_unknown"` is `True`)
|
| 237 |
+
```python
|
| 238 |
+
{
|
| 239 |
+
"file": "tree/8b775397_nohash_0.wav",
|
| 240 |
+
"audio": {
|
| 241 |
+
"path": "tree/8b775397_nohash_0.wav",
|
| 242 |
+
"array": array([ -0.00854492, -0.01339722, -0.02026367, ..., 0.00274658,
|
| 243 |
+
0.00335693, 0.0005188]),
|
| 244 |
+
"sampling_rate": 16000
|
| 245 |
+
},
|
| 246 |
+
"label": 28, # "tree"
|
| 247 |
+
"is_unknown": True,
|
| 248 |
+
"speaker_id": "1b88bf70",
|
| 249 |
+
"utterance_id": 0
|
| 250 |
+
}
|
| 251 |
+
```
|
| 252 |
+
|
| 253 |
+
Example of background noise (`_silence_`) class:
|
| 254 |
+
|
| 255 |
+
```python
|
| 256 |
+
{
|
| 257 |
+
"file": "_silence_/doing_the_dishes.wav",
|
| 258 |
+
"audio": {
|
| 259 |
+
"path": "_silence_/doing_the_dishes.wav",
|
| 260 |
+
"array": array([ 0. , 0. , 0. , ..., -0.00592041,
|
| 261 |
+
-0.00405884, -0.00253296]),
|
| 262 |
+
"sampling_rate": 16000
|
| 263 |
+
},
|
| 264 |
+
"label": 30, # "_silence_"
|
| 265 |
+
"is_unknown": False,
|
| 266 |
+
"speaker_id": "None",
|
| 267 |
+
"utterance_id": 0 # doesn't make sense here
|
| 268 |
+
}
|
| 269 |
+
```
|
| 270 |
+
|
| 271 |
+
### Data Fields
|
| 272 |
+
|
| 273 |
+
* `file`: relative audio filename inside the original archive.
|
| 274 |
+
* `audio`: dictionary containing a relative audio filename,
|
| 275 |
+
a decoded audio array, and the sampling rate. Note that when accessing
|
| 276 |
+
the audio column: `dataset[0]["audio"]` the audio is automatically decoded
|
| 277 |
+
and resampled to `dataset.features["audio"].sampling_rate`.
|
| 278 |
+
Decoding and resampling of a large number of audios might take a significant
|
| 279 |
+
amount of time. Thus, it is important to first query the sample index before
|
| 280 |
+
the `"audio"` column, i.e. `dataset[0]["audio"]` should always be preferred
|
| 281 |
+
over `dataset["audio"][0]`.
|
| 282 |
+
* `label`: either word pronounced in an audio sample or background noise (`_silence_`) class.
|
| 283 |
+
Note that it's an integer value corresponding to the class name.
|
| 284 |
+
* `is_unknown`: if a word is auxiliary. Equals to `False` if a word is a core word or `_silence_`,
|
| 285 |
+
`True` if a word is an auxiliary word.
|
| 286 |
+
* `speaker_id`: unique id of a speaker. Equals to `None` if label is `_silence_`.
|
| 287 |
+
* `utterance_id`: incremental id of a word utterance within the same speaker.
|
| 288 |
+
|
| 289 |
+
### Data Splits
|
| 290 |
+
|
| 291 |
+
The dataset has two versions (= configurations): `"v0.01"` and `"v0.02"`. `"v0.02"`
|
| 292 |
+
contains more words (see section [Source Data](#source-data) for more details).
|
| 293 |
+
|
| 294 |
+
| | train | validation | test |
|
| 295 |
+
|----- |------:|-----------:|-----:|
|
| 296 |
+
| v0.01 | 51093 | 6799 | 3081 |
|
| 297 |
+
| v0.02 | 84848 | 9982 | 4890 |
|
| 298 |
+
|
| 299 |
+
Note that in train and validation sets examples of `_silence_` class are longer than 1 second.
|
| 300 |
+
You can use the following code to sample 1-second examples from the longer ones:
|
| 301 |
+
|
| 302 |
+
```python
|
| 303 |
+
def sample_noise(example):
|
| 304 |
+
# Use this function to extract random 1 sec slices of each _silence_ utterance,
|
| 305 |
+
# e.g. inside `torch.utils.data.Dataset.__getitem__()`
|
| 306 |
+
from random import randint
|
| 307 |
+
|
| 308 |
+
if example["label"] == "_silence_":
|
| 309 |
+
random_offset = randint(0, len(example["speech"]) - example["sample_rate"] - 1)
|
| 310 |
+
example["speech"] = example["speech"][random_offset : random_offset + example["sample_rate"]]
|
| 311 |
+
|
| 312 |
+
return example
|
| 313 |
+
```
|
| 314 |
+
|
| 315 |
+
## Dataset Creation
|
| 316 |
+
|
| 317 |
+
### Curation Rationale
|
| 318 |
+
|
| 319 |
+
The primary goal of the dataset is to provide a way to build and test small
|
| 320 |
+
models that can detect a single word from a set of target words and differentiate it
|
| 321 |
+
from background noise or unrelated speech with as few false positives as possible.
|
| 322 |
+
|
| 323 |
+
### Source Data
|
| 324 |
+
|
| 325 |
+
#### Initial Data Collection and Normalization
|
| 326 |
+
|
| 327 |
+
The audio files were collected using crowdsourcing, see
|
| 328 |
+
[aiyprojects.withgoogle.com/open_speech_recording](https://github.com/petewarden/extract_loudest_section)
|
| 329 |
+
for some of the open source audio collection code that was used. The goal was to gather examples of
|
| 330 |
+
people speaking single-word commands, rather than conversational sentences, so
|
| 331 |
+
they were prompted for individual words over the course of a five minute
|
| 332 |
+
session.
|
| 333 |
+
|
| 334 |
+
In version 0.01 thirty different words were recoded: "Yes", "No", "Up", "Down", "Left",
|
| 335 |
+
"Right", "On", "Off", "Stop", "Go", "Zero", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine",
|
| 336 |
+
"Bed", "Bird", "Cat", "Dog", "Happy", "House", "Marvin", "Sheila", "Tree", "Wow".
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
In version 0.02 more words were added: "Backward", "Forward", "Follow", "Learn", "Visual".
|
| 340 |
+
|
| 341 |
+
In both versions, ten of them are used as commands by convention: "Yes", "No", "Up", "Down", "Left",
|
| 342 |
+
"Right", "On", "Off", "Stop", "Go". Other words are considered to be auxiliary (in current implementation
|
| 343 |
+
it is marked by `True` value of `"is_unknown"` feature). Their function is to teach a model to distinguish core words
|
| 344 |
+
from unrecognized ones.
|
| 345 |
+
|
| 346 |
+
The `_silence_` label contains a set of longer audio clips that are either recordings or
|
| 347 |
+
a mathematical simulation of noise.
|
| 348 |
+
|
| 349 |
+
#### Who are the source language producers?
|
| 350 |
+
|
| 351 |
+
The audio files were collected using crowdsourcing.
|
| 352 |
+
|
| 353 |
+
### Annotations
|
| 354 |
+
|
| 355 |
+
#### Annotation process
|
| 356 |
+
|
| 357 |
+
Labels are the list of words prepared in advances.
|
| 358 |
+
Speakers were prompted for individual words over the course of a five minute
|
| 359 |
+
session.
|
| 360 |
+
|
| 361 |
+
#### Who are the annotators?
|
| 362 |
+
|
| 363 |
+
[More Information Needed]
|
| 364 |
+
|
| 365 |
+
### Personal and Sensitive Information
|
| 366 |
+
|
| 367 |
+
The dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset.
|
| 368 |
+
|
| 369 |
+
## Considerations for Using the Data
|
| 370 |
+
|
| 371 |
+
### Social Impact of Dataset
|
| 372 |
+
|
| 373 |
+
[More Information Needed]
|
| 374 |
+
|
| 375 |
+
### Discussion of Biases
|
| 376 |
+
|
| 377 |
+
[More Information Needed]
|
| 378 |
+
|
| 379 |
+
### Other Known Limitations
|
| 380 |
+
|
| 381 |
+
[More Information Needed]
|
| 382 |
+
|
| 383 |
+
## Additional Information
|
| 384 |
+
|
| 385 |
+
### Dataset Curators
|
| 386 |
+
|
| 387 |
+
[More Information Needed]
|
| 388 |
+
|
| 389 |
+
### Licensing Information
|
| 390 |
+
|
| 391 |
+
Creative Commons BY 4.0 License ((CC-BY-4.0)[https://creativecommons.org/licenses/by/4.0/legalcode]).
|
| 392 |
+
|
| 393 |
+
### Citation Information
|
| 394 |
+
|
| 395 |
+
```
|
| 396 |
+
@article{speechcommandsv2,
|
| 397 |
+
author = { {Warden}, P.},
|
| 398 |
+
title = "{Speech Commands: A Dataset for Limited-Vocabulary Speech Recognition}",
|
| 399 |
+
journal = {ArXiv e-prints},
|
| 400 |
+
archivePrefix = "arXiv",
|
| 401 |
+
eprint = {1804.03209},
|
| 402 |
+
primaryClass = "cs.CL",
|
| 403 |
+
keywords = {Computer Science - Computation and Language, Computer Science - Human-Computer Interaction},
|
| 404 |
+
year = 2018,
|
| 405 |
+
month = apr,
|
| 406 |
+
url = {https://arxiv.org/abs/1804.03209},
|
| 407 |
+
}
|
| 408 |
+
```
|
| 409 |
+
### Contributions
|
| 410 |
+
|
| 411 |
+
Thanks to [@polinaeterna](https://github.com/polinaeterna) for adding this dataset.
|
speech_commands.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
"""Speech Commands, an audio dataset of spoken words designed to help train and evaluate keyword spotting systems. """
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
import textwrap
|
| 20 |
+
|
| 21 |
+
import datasets
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
_CITATION = """
|
| 25 |
+
@article{speechcommandsv2,
|
| 26 |
+
author = { {Warden}, P.},
|
| 27 |
+
title = "{Speech Commands: A Dataset for Limited-Vocabulary Speech Recognition}",
|
| 28 |
+
journal = {ArXiv e-prints},
|
| 29 |
+
archivePrefix = "arXiv",
|
| 30 |
+
eprint = {1804.03209},
|
| 31 |
+
primaryClass = "cs.CL",
|
| 32 |
+
keywords = {Computer Science - Computation and Language, Computer Science - Human-Computer Interaction},
|
| 33 |
+
year = 2018,
|
| 34 |
+
month = apr,
|
| 35 |
+
url = {https://arxiv.org/abs/1804.03209},
|
| 36 |
+
}
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
_DESCRIPTION = """
|
| 40 |
+
This is a set of one-second .wav audio files, each containing a single spoken
|
| 41 |
+
English word or background noise. These words are from a small set of commands, and are spoken by a
|
| 42 |
+
variety of different speakers. This data set is designed to help train simple
|
| 43 |
+
machine learning models. This dataset is covered in more detail at
|
| 44 |
+
[https://arxiv.org/abs/1804.03209](https://arxiv.org/abs/1804.03209).
|
| 45 |
+
|
| 46 |
+
Version 0.01 of the data set (configuration `"v0.01"`) was released on August 3rd 2017 and contains
|
| 47 |
+
64,727 audio files.
|
| 48 |
+
|
| 49 |
+
In version 0.01 thirty different words were recoded: "Yes", "No", "Up", "Down", "Left",
|
| 50 |
+
"Right", "On", "Off", "Stop", "Go", "Zero", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine",
|
| 51 |
+
"Bed", "Bird", "Cat", "Dog", "Happy", "House", "Marvin", "Sheila", "Tree", "Wow".
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
In version 0.02 more words were added: "Backward", "Forward", "Follow", "Learn", "Visual".
|
| 55 |
+
|
| 56 |
+
In both versions, ten of them are used as commands by convention: "Yes", "No", "Up", "Down", "Left",
|
| 57 |
+
"Right", "On", "Off", "Stop", "Go". Other words are considered to be auxiliary (in current implementation
|
| 58 |
+
it is marked by `True` value of `"is_unknown"` feature). Their function is to teach a model to distinguish core words
|
| 59 |
+
from unrecognized ones.
|
| 60 |
+
|
| 61 |
+
The `_silence_` class contains a set of longer audio clips that are either recordings or
|
| 62 |
+
a mathematical simulation of noise.
|
| 63 |
+
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
_LICENSE = "Creative Commons BY 4.0 License"
|
| 67 |
+
|
| 68 |
+
_URL = "https://www.tensorflow.org/datasets/catalog/speech_commands"
|
| 69 |
+
|
| 70 |
+
_DL_URL = "https://s3.amazonaws.com/datasets.huggingface.co/SpeechCommands/{name}/{name}_{split}.tar.gz"
|
| 71 |
+
|
| 72 |
+
WORDS = [
|
| 73 |
+
"yes",
|
| 74 |
+
"no",
|
| 75 |
+
"up",
|
| 76 |
+
"down",
|
| 77 |
+
"left",
|
| 78 |
+
"right",
|
| 79 |
+
"on",
|
| 80 |
+
"off",
|
| 81 |
+
"stop",
|
| 82 |
+
"go",
|
| 83 |
+
]
|
| 84 |
+
|
| 85 |
+
UNKNOWN_WORDS_V1 = [
|
| 86 |
+
"zero",
|
| 87 |
+
"one",
|
| 88 |
+
"two",
|
| 89 |
+
"three",
|
| 90 |
+
"four",
|
| 91 |
+
"five",
|
| 92 |
+
"six",
|
| 93 |
+
"seven",
|
| 94 |
+
"eight",
|
| 95 |
+
"nine",
|
| 96 |
+
"bed",
|
| 97 |
+
"bird",
|
| 98 |
+
"cat",
|
| 99 |
+
"dog",
|
| 100 |
+
"happy",
|
| 101 |
+
"house",
|
| 102 |
+
"marvin",
|
| 103 |
+
"sheila",
|
| 104 |
+
"tree",
|
| 105 |
+
"wow",
|
| 106 |
+
]
|
| 107 |
+
|
| 108 |
+
UNKNOWN_WORDS_V2 = UNKNOWN_WORDS_V1 + [
|
| 109 |
+
"backward",
|
| 110 |
+
"forward",
|
| 111 |
+
"follow",
|
| 112 |
+
"learn",
|
| 113 |
+
"visual",
|
| 114 |
+
]
|
| 115 |
+
|
| 116 |
+
SILENCE = "_silence_" # background noise
|
| 117 |
+
LABELS_V1 = WORDS + UNKNOWN_WORDS_V1 + [SILENCE]
|
| 118 |
+
LABELS_V2 = WORDS + UNKNOWN_WORDS_V2 + [SILENCE]
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class SpeechCommandsConfig(datasets.BuilderConfig):
|
| 122 |
+
"""BuilderConfig for SpeechCommands."""
|
| 123 |
+
|
| 124 |
+
def __init__(self, labels, **kwargs):
|
| 125 |
+
super(SpeechCommandsConfig, self).__init__(**kwargs)
|
| 126 |
+
self.labels = labels
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class SpeechCommands(datasets.GeneratorBasedBuilder):
|
| 130 |
+
BUILDER_CONFIGS = [
|
| 131 |
+
SpeechCommandsConfig(
|
| 132 |
+
name="v0.01",
|
| 133 |
+
description=textwrap.dedent(
|
| 134 |
+
"""\
|
| 135 |
+
Version 0.01 of the SpeechCommands dataset. Contains 30 words
|
| 136 |
+
(20 of them are auxiliary) and background noise.
|
| 137 |
+
"""
|
| 138 |
+
),
|
| 139 |
+
labels=LABELS_V1,
|
| 140 |
+
version=datasets.Version("0.1.0"),
|
| 141 |
+
),
|
| 142 |
+
SpeechCommandsConfig(
|
| 143 |
+
name="v0.02",
|
| 144 |
+
description=textwrap.dedent(
|
| 145 |
+
"""\
|
| 146 |
+
Version 0.02 of the SpeechCommands dataset.
|
| 147 |
+
Contains 35 words (25 of them are auxiliary) and background noise.
|
| 148 |
+
"""
|
| 149 |
+
),
|
| 150 |
+
labels=LABELS_V2,
|
| 151 |
+
version=datasets.Version("0.2.0"),
|
| 152 |
+
),
|
| 153 |
+
]
|
| 154 |
+
|
| 155 |
+
def _info(self):
|
| 156 |
+
return datasets.DatasetInfo(
|
| 157 |
+
description=_DESCRIPTION,
|
| 158 |
+
features=datasets.Features(
|
| 159 |
+
{
|
| 160 |
+
"file": datasets.Value("string"),
|
| 161 |
+
"audio": datasets.features.Audio(sampling_rate=16_000),
|
| 162 |
+
"label": datasets.ClassLabel(names=self.config.labels),
|
| 163 |
+
"is_unknown": datasets.Value("bool"),
|
| 164 |
+
"speaker_id": datasets.Value("string"),
|
| 165 |
+
"utterance_id": datasets.Value("int8"),
|
| 166 |
+
}
|
| 167 |
+
),
|
| 168 |
+
homepage=_URL,
|
| 169 |
+
citation=_CITATION,
|
| 170 |
+
license=_LICENSE,
|
| 171 |
+
version=self.config.version,
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
def _split_generators(self, dl_manager):
|
| 175 |
+
|
| 176 |
+
archive_paths = dl_manager.download(
|
| 177 |
+
{
|
| 178 |
+
"train": _DL_URL.format(name=self.config.name, split="train"),
|
| 179 |
+
"validation": _DL_URL.format(name=self.config.name, split="validation"),
|
| 180 |
+
"test": _DL_URL.format(name=self.config.name, split="test"),
|
| 181 |
+
}
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
return [
|
| 185 |
+
datasets.SplitGenerator(
|
| 186 |
+
name=datasets.Split.TRAIN,
|
| 187 |
+
gen_kwargs={
|
| 188 |
+
"archive": dl_manager.iter_archive(archive_paths["train"]),
|
| 189 |
+
},
|
| 190 |
+
),
|
| 191 |
+
datasets.SplitGenerator(
|
| 192 |
+
name=datasets.Split.VALIDATION,
|
| 193 |
+
gen_kwargs={
|
| 194 |
+
"archive": dl_manager.iter_archive(archive_paths["validation"]),
|
| 195 |
+
},
|
| 196 |
+
),
|
| 197 |
+
datasets.SplitGenerator(
|
| 198 |
+
name=datasets.Split.TEST,
|
| 199 |
+
gen_kwargs={
|
| 200 |
+
"archive": dl_manager.iter_archive(archive_paths["test"]),
|
| 201 |
+
},
|
| 202 |
+
),
|
| 203 |
+
]
|
| 204 |
+
|
| 205 |
+
def _generate_examples(self, archive):
|
| 206 |
+
for path, file in archive:
|
| 207 |
+
if not path.endswith(".wav"):
|
| 208 |
+
continue
|
| 209 |
+
|
| 210 |
+
word, audio_filename = path.split("/")
|
| 211 |
+
is_unknown = False
|
| 212 |
+
|
| 213 |
+
if word == SILENCE:
|
| 214 |
+
speaker_id, utterance_id = None, 0
|
| 215 |
+
|
| 216 |
+
else: # word is either in WORDS or unknown
|
| 217 |
+
if word not in WORDS:
|
| 218 |
+
is_unknown = True
|
| 219 |
+
# an audio filename looks like `0bac8a71_nohash_0.wav`
|
| 220 |
+
speaker_id, _, utterance_id = audio_filename.split(".wav")[0].split("_")
|
| 221 |
+
|
| 222 |
+
yield path, {
|
| 223 |
+
"file": path,
|
| 224 |
+
"audio": {"path": path, "bytes": file.read()},
|
| 225 |
+
"label": word,
|
| 226 |
+
"is_unknown": is_unknown,
|
| 227 |
+
"speaker_id": speaker_id,
|
| 228 |
+
"utterance_id": utterance_id,
|
| 229 |
+
}
|