Convert dataset to Parquet

#11
by lhoestq HF Staff - opened
README.md CHANGED
@@ -49,82 +49,58 @@ dataset_info:
49
  dtype: string
50
  splits:
51
  - name: train
52
- num_bytes: 11852430
53
  num_examples: 28539
54
  - name: validation
55
- num_bytes: 897213
56
  num_examples: 2703
57
  - name: test
58
- num_bytes: 871234
59
  num_examples: 2620
60
- download_size: 7071899769
61
- dataset_size: 13620877
62
- - config_name: sd
63
- features:
64
- - name: record_id
65
- dtype: string
66
- - name: file
67
- dtype: string
68
- - name: start
69
- dtype: int64
70
- - name: end
71
- dtype: int64
72
- - name: speakers
73
- list:
74
- - name: speaker_id
75
- dtype: string
76
- - name: start
77
- dtype: int64
78
- - name: end
79
- dtype: int64
80
- splits:
81
- - name: train
82
- num_bytes: 4622013
83
- num_examples: 13901
84
- - name: dev
85
- num_bytes: 860472
86
- num_examples: 3014
87
- - name: test
88
- num_bytes: 847803
89
- num_examples: 3002
90
- download_size: 7190370211
91
- dataset_size: 6330288
92
- - config_name: ks
93
  features:
94
  - name: file
95
  dtype: string
 
 
 
 
96
  - name: label
97
  dtype:
98
  class_label:
99
  names:
100
- '0': 'yes'
101
- '1': 'no'
102
- '2': up
103
- '3': down
104
- '4': left
105
- '5': right
106
- '6': 'on'
107
- '7': 'off'
108
- '8': stop
109
- '9': go
110
- '10': _silence_
111
- '11': _unknown_
112
  splits:
113
- - name: train
114
- num_bytes: 8467781
115
- num_examples: 51094
116
- - name: validation
117
- num_bytes: 1126476
118
- num_examples: 6798
119
- - name: test
120
- num_bytes: 510619
121
- num_examples: 3081
122
- download_size: 1560367713
123
- dataset_size: 10104876
 
 
 
 
 
 
124
  - config_name: ic
125
  features:
126
  - name: file
127
  dtype: string
 
 
 
 
128
  - name: speaker_id
129
  dtype: string
130
  - name: text
@@ -167,20 +143,94 @@ dataset_info:
167
  '3': washroom
168
  splits:
169
  - name: train
170
- num_bytes: 7071466
171
  num_examples: 23132
172
  - name: validation
173
- num_bytes: 953622
174
  num_examples: 3118
175
  - name: test
176
- num_bytes: 1158347
177
  num_examples: 3793
178
- download_size: 1544093324
179
- dataset_size: 9183435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  - config_name: si
181
  features:
182
  - name: file
183
  dtype: string
 
 
 
 
184
  - name: label
185
  dtype:
186
  class_label:
@@ -1438,16 +1488,69 @@ dataset_info:
1438
  '1250': id11251
1439
  splits:
1440
  - name: train
1441
- num_bytes: 12729268
1442
  num_examples: 138361
1443
  - name: validation
1444
- num_bytes: 635172
1445
  num_examples: 6904
1446
  - name: test
1447
- num_bytes: 759096
1448
  num_examples: 8251
1449
- download_size: 0
1450
- dataset_size: 14123536
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1451
  ---
1452
 
1453
  # Dataset Card for SUPERB
 
49
  dtype: string
50
  splits:
51
  - name: train
52
+ num_bytes: 6375977437.341
53
  num_examples: 28539
54
  - name: validation
55
+ num_bytes: 348570484.902
56
  num_examples: 2703
57
  - name: test
58
+ num_bytes: 372116604.94
59
  num_examples: 2620
60
+ download_size: 7125182283
61
+ dataset_size: 7096664527.183
62
+ - config_name: er
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  features:
64
  - name: file
65
  dtype: string
66
+ - name: audio
67
+ dtype:
68
+ audio:
69
+ sampling_rate: 16000
70
  - name: label
71
  dtype:
72
  class_label:
73
  names:
74
+ '0': neu
75
+ '1': hap
76
+ '2': ang
77
+ '3': sad
 
 
 
 
 
 
 
 
78
  splits:
79
+ - name: session1
80
+ num_bytes: 165335280.64
81
+ num_examples: 1085
82
+ - name: session2
83
+ num_bytes: 154368877.13
84
+ num_examples: 1023
85
+ - name: session3
86
+ num_bytes: 158481419.59
87
+ num_examples: 1151
88
+ - name: session4
89
+ num_bytes: 147948458.55
90
+ num_examples: 1031
91
+ - name: session5
92
+ num_bytes: 170303066.098
93
+ num_examples: 1241
94
+ download_size: 787372540
95
+ dataset_size: 796437102.0080001
96
  - config_name: ic
97
  features:
98
  - name: file
99
  dtype: string
100
+ - name: audio
101
+ dtype:
102
+ audio:
103
+ sampling_rate: 16000
104
  - name: speaker_id
105
  dtype: string
106
  - name: text
 
143
  '3': washroom
144
  splits:
145
  - name: train
146
+ num_bytes: 1874399385.936
147
  num_examples: 23132
148
  - name: validation
149
+ num_bytes: 239956791.064
150
  num_examples: 3118
151
  - name: test
152
+ num_bytes: 315754572.65
153
  num_examples: 3793
154
+ download_size: 1916993254
155
+ dataset_size: 2430110749.65
156
+ - config_name: ks
157
+ features:
158
+ - name: file
159
+ dtype: string
160
+ - name: audio
161
+ dtype:
162
+ audio:
163
+ sampling_rate: 16000
164
+ - name: label
165
+ dtype:
166
+ class_label:
167
+ names:
168
+ '0': 'yes'
169
+ '1': 'no'
170
+ '2': up
171
+ '3': down
172
+ '4': left
173
+ '5': right
174
+ '6': 'on'
175
+ '7': 'off'
176
+ '8': stop
177
+ '9': go
178
+ '10': _silence_
179
+ '11': _unknown_
180
+ splits:
181
+ - name: train
182
+ num_bytes: 2260079579.356
183
+ num_examples: 51094
184
+ - name: validation
185
+ num_bytes: 215210144.552
186
+ num_examples: 6798
187
+ - name: test
188
+ num_bytes: 99219235.676
189
+ num_examples: 3081
190
+ download_size: 1770827480
191
+ dataset_size: 2574508959.584
192
+ - config_name: sd
193
+ features:
194
+ - name: record_id
195
+ dtype: string
196
+ - name: file
197
+ dtype: string
198
+ - name: audio
199
+ dtype:
200
+ audio:
201
+ sampling_rate: 16000
202
+ - name: start
203
+ dtype: int64
204
+ - name: end
205
+ dtype: int64
206
+ - name: speakers
207
+ list:
208
+ - name: speaker_id
209
+ dtype: string
210
+ - name: start
211
+ dtype: int64
212
+ - name: end
213
+ dtype: int64
214
+ splits:
215
+ - name: train
216
+ num_bytes: 6441566080.729
217
+ num_examples: 13901
218
+ - name: dev
219
+ num_bytes: 886182068.496
220
+ num_examples: 3014
221
+ - name: test
222
+ num_bytes: 806042390.538
223
+ num_examples: 3002
224
+ download_size: 8179921384
225
+ dataset_size: 8133790539.7630005
226
  - config_name: si
227
  features:
228
  - name: file
229
  dtype: string
230
+ - name: audio
231
+ dtype:
232
+ audio:
233
+ sampling_rate: 16000
234
  - name: label
235
  dtype:
236
  class_label:
 
1488
  '1250': id11251
1489
  splits:
1490
  - name: train
1491
+ num_bytes: 35010298085.36
1492
  num_examples: 138361
1493
  - name: validation
1494
+ num_bytes: 1707484335.0
1495
  num_examples: 6904
1496
  - name: test
1497
+ num_bytes: 2078843122.32
1498
  num_examples: 8251
1499
+ download_size: 40376064647
1500
+ dataset_size: 38796625542.68
1501
+ configs:
1502
+ - config_name: asr
1503
+ data_files:
1504
+ - split: train
1505
+ path: asr/train-*
1506
+ - split: validation
1507
+ path: asr/validation-*
1508
+ - split: test
1509
+ path: asr/test-*
1510
+ - config_name: er
1511
+ data_files:
1512
+ - split: session1
1513
+ path: er/session1-*
1514
+ - split: session2
1515
+ path: er/session2-*
1516
+ - split: session3
1517
+ path: er/session3-*
1518
+ - split: session4
1519
+ path: er/session4-*
1520
+ - split: session5
1521
+ path: er/session5-*
1522
+ - config_name: ic
1523
+ data_files:
1524
+ - split: train
1525
+ path: ic/train-*
1526
+ - split: validation
1527
+ path: ic/validation-*
1528
+ - split: test
1529
+ path: ic/test-*
1530
+ - config_name: ks
1531
+ data_files:
1532
+ - split: train
1533
+ path: ks/train-*
1534
+ - split: validation
1535
+ path: ks/validation-*
1536
+ - split: test
1537
+ path: ks/test-*
1538
+ - config_name: sd
1539
+ data_files:
1540
+ - split: train
1541
+ path: sd/train-*
1542
+ - split: dev
1543
+ path: sd/dev-*
1544
+ - split: test
1545
+ path: sd/test-*
1546
+ - config_name: si
1547
+ data_files:
1548
+ - split: train
1549
+ path: si/train-*
1550
+ - split: validation
1551
+ path: si/validation-*
1552
+ - split: test
1553
+ path: si/test-*
1554
  ---
1555
 
1556
  # Dataset Card for SUPERB
asr/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edf6eb5029184547e9c2b7ee07844d98b1bb333d95909146edce9afaefa362c1
3
+ size 350367696
asr/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a921898a2c41711c79dea0cf86c2a9feddad594d6b469571a0a5ee4485403f80
3
+ size 3231467504
asr/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96bb09cc4768cf85806d99f69569a681df9e41926b6e5ea98366400ebec2d34f
3
+ size 3201610095
asr/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:766714bb5eb3870e45e07a59b27374bbfe51c9cd79800f002ab2604a8a97ca63
3
+ size 341736988
er/session1-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ded195a870815e8d94ff48f19b080eba8cb83b4d836c58aea7cbd51dad01afda
3
+ size 164127784
er/session2-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fa215715513a659c551f1b9204753ee4ef6d143f4826e6567d72fdef712ecdb
3
+ size 151423541
er/session3-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a814da93d92ca2e98a28e955f621cf3eec76104c04bc96e5ac37031c5a31c69
3
+ size 154930651
er/session4-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90b933081572e93f0622e85800e167644485d2927f43dd6f42b484a44ba50752
3
+ size 142714787
er/session5-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ea853f3ad6f06fb3dcb7a9f1eca55ae26b61130ab5e5f281c7dedd25b5ef1c8
3
+ size 174175777
ic/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4b8e5cdf16b5f79a7230e0a89867ea7fd671667c2fe53340935dc9662731d24
3
+ size 254716542
ic/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5347b37e4e11220374b94a63abca24995b08b589a03bb7d87840200ef0cf6312
3
+ size 1461872632
ic/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9683ebf0d030540af58a464e0e65a4873ebd23cfd00a4593b37a2e87efe657e
3
+ size 200404080
ks/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a4e04cbee1b6f4e65a58979d86bc97864b08979005f861c239044dd3f17a0ef
3
+ size 89509029
ks/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5388b3df4235bfd0088607e846b1de29cbd242ffbe6a64a5b5b118572798e81e
3
+ size 1486915614
ks/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:575aa9375dfce076a40c797ed9e868185c20e881956cded7c5fb76e35346097e
3
+ size 194402837
sd/dev-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97654d69b9d1a5333090f8ebd9450d81bdc4273c31dbc9099ec654b8a638c05e
3
+ size 875908406
sd/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a269b409b20bd58a1b3af99ea2e6b1161e57af2fc4dd0c9de94c00cadbbfd768
3
+ size 808212224
sd/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fd60f2f7a080c7a809de1ba7677fb22ff83a94b1ff6dc2ef8693e48d613b161
3
+ size 3241549444
sd/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09944f7008a87f909d56678dc771c14614b4b9f8419ca5a3e4e0e411366c889a
3
+ size 3254251310
si/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb33c145765765604933cfbab5001ca2065b31d4bdce40ea765dd3851a66a0a7
3
+ size 2070604755
si/train-00000-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e44bda7f41c58cd5e41aad075131af66a78eaa761f8332913a8759fbe9d667e
3
+ size 3892550746
si/train-00001-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b960ef06b038fb25c7a053db266f8dda342063e9333ba7a86cf48ee831e64058
3
+ size 3992391334
si/train-00002-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58387f08b30e63a9909eb4c2a45c7a7a892eaf5a780ea57799cc7766aac319bc
3
+ size 4071358640
si/train-00003-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a37c1895c87e3eeaa5763b16dcd46aaeba37ad0e2468101d5e5eec732fd85c6
3
+ size 3960240394
si/train-00004-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b2397f55b2a4023321f005fff7e7222490e52f69f61307085b1536a5be42a8b
3
+ size 4168651375
si/train-00005-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:979f12ac4cc094fabe1f6f93e51af54d4202a00a5f596dc1487ca3f9841c4d3c
3
+ size 4130986890
si/train-00006-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea1004fb99123640ceff72c3d5964a65086311786b067dc793e0fabfa45ea670
3
+ size 4045167192
si/train-00007-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2ecee93df072019345072cf4817b23cdd5ec6f33fd2b61feddbdbf283531e7
3
+ size 4183253949
si/train-00008-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fffae70f2b190ced86d0f6f00053c54aee2d898d71ad7acff9c37ada732c0197
3
+ size 4114588516
si/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a709a814d649486708cacbefc3016bfbb0f01454ea9594e159810de633b679a
3
+ size 1746270856
superb.py DELETED
@@ -1,686 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """SUPERB: Speech processing Universal PERformance Benchmark."""
18
-
19
- import csv
20
- import glob
21
- import os
22
- import textwrap
23
- from dataclasses import dataclass
24
-
25
- import datasets
26
-
27
-
28
- _CITATION = """\
29
- @article{DBLP:journals/corr/abs-2105-01051,
30
- author = {Shu{-}Wen Yang and
31
- Po{-}Han Chi and
32
- Yung{-}Sung Chuang and
33
- Cheng{-}I Jeff Lai and
34
- Kushal Lakhotia and
35
- Yist Y. Lin and
36
- Andy T. Liu and
37
- Jiatong Shi and
38
- Xuankai Chang and
39
- Guan{-}Ting Lin and
40
- Tzu{-}Hsien Huang and
41
- Wei{-}Cheng Tseng and
42
- Ko{-}tik Lee and
43
- Da{-}Rong Liu and
44
- Zili Huang and
45
- Shuyan Dong and
46
- Shang{-}Wen Li and
47
- Shinji Watanabe and
48
- Abdelrahman Mohamed and
49
- Hung{-}yi Lee},
50
- title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
51
- journal = {CoRR},
52
- volume = {abs/2105.01051},
53
- year = {2021},
54
- url = {https://arxiv.org/abs/2105.01051},
55
- archivePrefix = {arXiv},
56
- eprint = {2105.01051},
57
- timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
58
- biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
59
- bibsource = {dblp computer science bibliography, https://dblp.org}
60
- }
61
- """
62
-
63
- _DESCRIPTION = """\
64
- Self-supervised learning (SSL) has proven vital for advancing research in
65
- natural language processing (NLP) and computer vision (CV). The paradigm
66
- pretrains a shared model on large volumes of unlabeled data and achieves
67
- state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
68
- speech processing community lacks a similar setup to systematically explore the
69
- paradigm. To bridge this gap, we introduce Speech processing Universal
70
- PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
71
- performance of a shared model across a wide range of speech processing tasks
72
- with minimal architecture changes and labeled data. Among multiple usages of the
73
- shared model, we especially focus on extracting the representation learned from
74
- SSL due to its preferable re-usability. We present a simple framework to solve
75
- SUPERB tasks by learning task-specialized lightweight prediction heads on top of
76
- the frozen shared model. Our results demonstrate that the framework is promising
77
- as SSL representations show competitive generalizability and accessibility
78
- across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
79
- benchmark toolkit to fuel the research in representation learning and general
80
- speech processing.
81
-
82
- Note that in order to limit the required storage for preparing this dataset, the
83
- audio is stored in the .wav format and is not converted to a float32 array. To
84
- convert the audio file to a float32 array, please make use of the `.map()`
85
- function as follows:
86
-
87
-
88
- ```python
89
- import soundfile as sf
90
-
91
- def map_to_array(batch):
92
- speech_array, _ = sf.read(batch["file"])
93
- batch["speech"] = speech_array
94
- return batch
95
-
96
- dataset = dataset.map(map_to_array, remove_columns=["file"])
97
- ```
98
- """
99
-
100
-
101
- class SuperbConfig(datasets.BuilderConfig):
102
- """BuilderConfig for Superb."""
103
-
104
- def __init__(
105
- self,
106
- features,
107
- url,
108
- data_url=None,
109
- supervised_keys=None,
110
- **kwargs,
111
- ):
112
- super().__init__(version=datasets.Version("1.9.0", ""), **kwargs)
113
- self.features = features
114
- self.data_url = data_url
115
- self.url = url
116
- self.supervised_keys = supervised_keys
117
-
118
-
119
- class Superb(datasets.GeneratorBasedBuilder):
120
- """Superb dataset."""
121
-
122
- BUILDER_CONFIGS = [
123
- SuperbConfig(
124
- name="asr",
125
- description=textwrap.dedent(
126
- """\
127
- ASR transcribes utterances into words. While PR analyzes the
128
- improvement in modeling phonetics, ASR reflects the significance of
129
- the improvement in a real-world scenario. LibriSpeech
130
- train-clean-100/dev-clean/test-clean subsets are used for
131
- training/validation/testing. The evaluation metric is word error
132
- rate (WER)."""
133
- ),
134
- features=datasets.Features(
135
- {
136
- "file": datasets.Value("string"),
137
- "audio": datasets.Audio(sampling_rate=16_000),
138
- "text": datasets.Value("string"),
139
- "speaker_id": datasets.Value("int64"),
140
- "chapter_id": datasets.Value("int64"),
141
- "id": datasets.Value("string"),
142
- }
143
- ),
144
- supervised_keys=("file", "text"),
145
- url="http://www.openslr.org/12",
146
- data_url="http://www.openslr.org/resources/12/",
147
- ),
148
- SuperbConfig(
149
- name="ks",
150
- description=textwrap.dedent(
151
- """\
152
- Keyword Spotting (KS) detects preregistered keywords by classifying utterances into a predefined set of
153
- words. The task is usually performed on-device for the fast response time. Thus, accuracy, model size, and
154
- inference time are all crucial. SUPERB uses the widely used Speech Commands dataset v1.0 for the task.
155
- The dataset consists of ten classes of keywords, a class for silence, and an unknown class to include the
156
- false positive. The evaluation metric is accuracy (ACC)"""
157
- ),
158
- features=datasets.Features(
159
- {
160
- "file": datasets.Value("string"),
161
- "audio": datasets.Audio(sampling_rate=16_000),
162
- "label": datasets.ClassLabel(
163
- names=[
164
- "yes",
165
- "no",
166
- "up",
167
- "down",
168
- "left",
169
- "right",
170
- "on",
171
- "off",
172
- "stop",
173
- "go",
174
- "_silence_",
175
- "_unknown_",
176
- ]
177
- ),
178
- }
179
- ),
180
- supervised_keys=("file", "label"),
181
- url="https://www.tensorflow.org/datasets/catalog/speech_commands",
182
- data_url="http://download.tensorflow.org/data/{filename}",
183
- ),
184
- SuperbConfig(
185
- name="ic",
186
- description=textwrap.dedent(
187
- """\
188
- Intent Classification (IC) classifies utterances into predefined classes to determine the intent of
189
- speakers. SUPERB uses the Fluent Speech Commands dataset, where each utterance is tagged with three intent
190
- labels: action, object, and location. The evaluation metric is accuracy (ACC)."""
191
- ),
192
- features=datasets.Features(
193
- {
194
- "file": datasets.Value("string"),
195
- "audio": datasets.Audio(sampling_rate=16_000),
196
- "speaker_id": datasets.Value("string"),
197
- "text": datasets.Value("string"),
198
- "action": datasets.ClassLabel(
199
- names=["activate", "bring", "change language", "deactivate", "decrease", "increase"]
200
- ),
201
- "object": datasets.ClassLabel(
202
- names=[
203
- "Chinese",
204
- "English",
205
- "German",
206
- "Korean",
207
- "heat",
208
- "juice",
209
- "lamp",
210
- "lights",
211
- "music",
212
- "newspaper",
213
- "none",
214
- "shoes",
215
- "socks",
216
- "volume",
217
- ]
218
- ),
219
- "location": datasets.ClassLabel(names=["bedroom", "kitchen", "none", "washroom"]),
220
- }
221
- ),
222
- supervised_keys=None,
223
- url="https://fluent.ai/fluent-speech-commands-a-dataset-for-spoken-language-understanding-research/",
224
- data_url="http://fluent.ai:2052/jf8398hf30f0381738rucj3828chfdnchs.tar.gz",
225
- ),
226
- SuperbConfig(
227
- name="si",
228
- description=textwrap.dedent(
229
- """\
230
- Speaker Identification (SI) classifies each utterance for its speaker identity as a multi-class
231
- classification, where speakers are in the same predefined set for both training and testing. The widely
232
- used VoxCeleb1 dataset is adopted, and the evaluation metric is accuracy (ACC)."""
233
- ),
234
- features=datasets.Features(
235
- {
236
- "file": datasets.Value("string"),
237
- "audio": datasets.Audio(sampling_rate=16_000),
238
- # VoxCeleb1 contains 1251 speaker IDs in range ["id10001",..."id11251"]
239
- "label": datasets.ClassLabel(names=[f"id{i + 10001}" for i in range(1251)]),
240
- }
241
- ),
242
- supervised_keys=("file", "label"),
243
- url="https://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1.html",
244
- ),
245
- SuperbConfig(
246
- name="sd",
247
- description=textwrap.dedent(
248
- """\
249
- Speaker Diarization (SD) predicts `who is speaking when` for each timestamp, and multiple speakers can
250
- speak simultaneously. The model has to encode rich speaker characteristics for each frame and should be
251
- able to represent mixtures of signals. [LibriMix] is adopted where LibriSpeech
252
- train-clean-100/dev-clean/test-clean are used to generate mixtures for training/validation/testing.
253
- We focus on the two-speaker scenario as the first step. The time-coded speaker labels were generated using
254
- alignments from Kaldi LibriSpeech ASR model. The evaluation metric is diarization error rate (DER)."""
255
- ),
256
- features=datasets.Features(
257
- {
258
- "record_id": datasets.Value("string"),
259
- "file": datasets.Value("string"),
260
- "audio": datasets.Audio(sampling_rate=16_000),
261
- "start": datasets.Value("int64"),
262
- "end": datasets.Value("int64"),
263
- "speakers": [
264
- {
265
- "speaker_id": datasets.Value("string"),
266
- "start": datasets.Value("int64"),
267
- "end": datasets.Value("int64"),
268
- }
269
- ],
270
- }
271
- ), # TODO
272
- supervised_keys=None, # TODO
273
- url="https://github.com/ftshijt/LibriMix",
274
- data_url="https://huggingface.co/datasets/superb/superb-data/resolve/main/sd/{split}/{filename}",
275
- ),
276
- SuperbConfig(
277
- name="er",
278
- description=textwrap.dedent(
279
- """\
280
- Emotion Recognition (ER) predicts an emotion class for each utterance. The most widely used ER dataset
281
- IEMOCAP is adopted, and we follow the conventional evaluation protocol: we drop the unbalanced emotion
282
- classes to leave the final four classes with a similar amount of data points and cross-validate on five
283
- folds of the standard splits. The evaluation metric is accuracy (ACC)."""
284
- ),
285
- features=datasets.Features(
286
- {
287
- "file": datasets.Value("string"),
288
- "audio": datasets.Audio(sampling_rate=16_000),
289
- "label": datasets.ClassLabel(names=["neu", "hap", "ang", "sad"]),
290
- }
291
- ),
292
- supervised_keys=("file", "label"),
293
- url="https://sail.usc.edu/iemocap/",
294
- ),
295
- ]
296
-
297
- @property
298
- def manual_download_instructions(self):
299
- if self.config.name == "si":
300
- return textwrap.dedent(
301
- """
302
- Please download the VoxCeleb dataset using the following script,
303
- which should create `VoxCeleb1/wav/id*` directories for both train and test speakers`:
304
- ```
305
- mkdir VoxCeleb1
306
- cd VoxCeleb1
307
-
308
- wget https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partaa
309
- wget https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partab
310
- wget https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partac
311
- wget https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partad
312
- cat vox1_dev* > vox1_dev_wav.zip
313
- unzip vox1_dev_wav.zip
314
-
315
- wget https://thor.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_test_wav.zip
316
- unzip vox1_test_wav.zip
317
-
318
- # download the official SUPERB train-dev-test split
319
- wget https://raw.githubusercontent.com/s3prl/s3prl/master/s3prl/downstream/voxceleb1/veri_test_class.txt
320
- ```"""
321
- )
322
- elif self.config.name == "er":
323
- return textwrap.dedent(
324
- """
325
- Please download the IEMOCAP dataset after submitting the request form here:
326
- https://sail.usc.edu/iemocap/iemocap_release.htm
327
- Having downloaded the dataset you can extract it with `tar -xvzf IEMOCAP_full_release.tar.gz`
328
- which should create a folder called `IEMOCAP_full_release`
329
- """
330
- )
331
- return None
332
-
333
- def _info(self):
334
- return datasets.DatasetInfo(
335
- description=_DESCRIPTION,
336
- features=self.config.features,
337
- supervised_keys=self.config.supervised_keys,
338
- homepage=self.config.url,
339
- citation=_CITATION,
340
- )
341
-
342
- def _split_generators(self, dl_manager):
343
- if self.config.name == "asr":
344
- _DL_URLS = {
345
- "dev": self.config.data_url + "dev-clean.tar.gz",
346
- "test": self.config.data_url + "test-clean.tar.gz",
347
- "train": self.config.data_url + "train-clean-100.tar.gz",
348
- }
349
- archive_path = dl_manager.download_and_extract(_DL_URLS)
350
-
351
- return [
352
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path["train"]}),
353
- datasets.SplitGenerator(
354
- name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path["dev"]}
355
- ),
356
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path["test"]}),
357
- ]
358
- elif self.config.name == "ks":
359
- _DL_URLS = {
360
- "train_val_test": self.config.data_url.format(filename="speech_commands_v0.01.tar.gz"),
361
- "test": self.config.data_url.format(filename="speech_commands_test_set_v0.01.tar.gz"),
362
- }
363
- archive_path = dl_manager.download_and_extract(_DL_URLS)
364
- return [
365
- datasets.SplitGenerator(
366
- name=datasets.Split.TRAIN,
367
- gen_kwargs={"archive_path": archive_path["train_val_test"], "split": "train"},
368
- ),
369
- datasets.SplitGenerator(
370
- name=datasets.Split.VALIDATION,
371
- gen_kwargs={"archive_path": archive_path["train_val_test"], "split": "val"},
372
- ),
373
- datasets.SplitGenerator(
374
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path["test"], "split": "test"}
375
- ),
376
- ]
377
- elif self.config.name == "ic":
378
- archive_path = dl_manager.download_and_extract(self.config.data_url)
379
- return [
380
- datasets.SplitGenerator(
381
- name=datasets.Split.TRAIN,
382
- gen_kwargs={"archive_path": archive_path, "split": "train"},
383
- ),
384
- datasets.SplitGenerator(
385
- name=datasets.Split.VALIDATION,
386
- gen_kwargs={"archive_path": archive_path, "split": "valid"},
387
- ),
388
- datasets.SplitGenerator(
389
- name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}
390
- ),
391
- ]
392
- elif self.config.name == "si":
393
- manual_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
394
- return [
395
- datasets.SplitGenerator(
396
- name=datasets.Split.TRAIN,
397
- gen_kwargs={"archive_path": manual_dir, "split": 1},
398
- ),
399
- datasets.SplitGenerator(
400
- name=datasets.Split.VALIDATION,
401
- gen_kwargs={"archive_path": manual_dir, "split": 2},
402
- ),
403
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"archive_path": manual_dir, "split": 3}),
404
- ]
405
- elif self.config.name == "sd":
406
- splits = ["train", "dev", "test"]
407
- _DL_URLS = {
408
- split: {
409
- filename: self.config.data_url.format(split=split, filename=filename)
410
- for filename in ["reco2dur", "segments", "utt2spk", "wav.zip"]
411
- }
412
- for split in splits
413
- }
414
- archive_path = dl_manager.download_and_extract(_DL_URLS)
415
- return [
416
- datasets.SplitGenerator(
417
- name=datasets.NamedSplit(split), gen_kwargs={"archive_path": archive_path[split], "split": split}
418
- )
419
- for split in splits
420
- ]
421
- elif self.config.name == "er":
422
- manual_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
423
- return [
424
- datasets.SplitGenerator(
425
- name=f"session{i}",
426
- gen_kwargs={"archive_path": manual_dir, "split": i},
427
- )
428
- for i in range(1, 6)
429
- ]
430
-
431
- def _generate_examples(self, archive_path, split=None):
432
- """Generate examples."""
433
- if self.config.name == "asr":
434
- transcripts_glob = os.path.join(archive_path, "LibriSpeech", "*", "*", "*", "*.txt")
435
- key = 0
436
- for transcript_path in sorted(glob.glob(transcripts_glob)):
437
- transcript_dir_path = os.path.dirname(transcript_path)
438
- with open(transcript_path, "r", encoding="utf-8") as f:
439
- for line in f:
440
- line = line.strip()
441
- id_, transcript = line.split(" ", 1)
442
- audio_file = f"{id_}.flac"
443
- speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
444
- audio_path = os.path.join(transcript_dir_path, audio_file)
445
- yield key, {
446
- "id": id_,
447
- "speaker_id": speaker_id,
448
- "chapter_id": chapter_id,
449
- "file": audio_path,
450
- "audio": audio_path,
451
- "text": transcript,
452
- }
453
- key += 1
454
- elif self.config.name == "ks":
455
- words = ["yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go"]
456
- splits = _split_ks_files(archive_path, split)
457
- for key, audio_file in enumerate(sorted(splits[split])):
458
- base_dir, file_name = os.path.split(audio_file)
459
- _, word = os.path.split(base_dir)
460
- if word in words:
461
- label = word
462
- elif word == "_silence_" or word == "_background_noise_":
463
- label = "_silence_"
464
- else:
465
- label = "_unknown_"
466
- yield key, {"file": audio_file, "audio": audio_file, "label": label}
467
- elif self.config.name == "ic":
468
- root_path = os.path.join(archive_path, "fluent_speech_commands_dataset")
469
- csv_path = os.path.join(root_path, "data", f"{split}_data.csv")
470
- with open(csv_path, encoding="utf-8") as csv_file:
471
- csv_reader = csv.reader(csv_file, delimiter=",", skipinitialspace=True)
472
- next(csv_reader)
473
- for row in csv_reader:
474
- key, file_path, speaker_id, text, action, object_, location = row
475
- audio_path = os.path.join(root_path, file_path)
476
- yield key, {
477
- "file": audio_path,
478
- "audio": audio_path,
479
- "speaker_id": speaker_id,
480
- "text": text,
481
- "action": action,
482
- "object": object_,
483
- "location": location,
484
- }
485
- elif self.config.name == "si":
486
- wav_path = os.path.join(archive_path, "wav")
487
- splits_path = os.path.join(archive_path, "veri_test_class.txt")
488
- with open(splits_path, "r", encoding="utf-8") as f:
489
- for key, line in enumerate(f):
490
- split_id, file_path = line.strip().split(" ")
491
- if int(split_id) != split:
492
- continue
493
- speaker_id = file_path.split("/")[0]
494
- audio_path = os.path.join(wav_path, file_path)
495
- yield key, {
496
- "file": audio_path,
497
- "audio": audio_path,
498
- "label": speaker_id,
499
- }
500
- elif self.config.name == "sd":
501
- data = SdData(archive_path)
502
- args = SdArgs()
503
- chunk_indices = _generate_chunk_indices(data, args, split=split)
504
- if split != "test":
505
- for key, (rec, st, ed) in enumerate(chunk_indices):
506
- speakers = _get_speakers(rec, data, args)
507
- yield key, {
508
- "record_id": rec,
509
- "file": data.wavs[rec],
510
- "audio": data.wavs[rec],
511
- "start": st,
512
- "end": ed,
513
- "speakers": speakers,
514
- }
515
- else:
516
- key = 0
517
- for rec in chunk_indices:
518
- for rec, st, ed in chunk_indices[rec]:
519
- speakers = _get_speakers(rec, data, args)
520
- yield key, {
521
- "record_id": rec,
522
- "file": data.wavs[rec],
523
- "audio": data.wavs[rec],
524
- "start": st,
525
- "end": ed,
526
- "speakers": speakers,
527
- }
528
- key += 1
529
- elif self.config.name == "er":
530
- root_path = os.path.join(archive_path, f"Session{split}")
531
- wav_path = os.path.join(root_path, "sentences", "wav")
532
- labels_path = os.path.join(root_path, "dialog", "EmoEvaluation", "*.txt")
533
- emotions = ["neu", "hap", "ang", "sad", "exc"]
534
- key = 0
535
- for labels_file in sorted(glob.glob(labels_path)):
536
- with open(labels_file, "r", encoding="utf-8") as f:
537
- for line in f:
538
- if line[0] != "[":
539
- continue
540
- _, filename, emo, _ = line.split("\t")
541
- if emo not in emotions:
542
- continue
543
- wav_subdir = filename.rsplit("_", 1)[0]
544
- filename = f"{filename}.wav"
545
- audio_path = os.path.join(wav_path, wav_subdir, filename)
546
- yield key, {
547
- "file": audio_path,
548
- "audio": audio_path,
549
- "label": emo.replace("exc", "hap"),
550
- }
551
- key += 1
552
-
553
-
554
- class SdData:
555
- def __init__(self, data_dir):
556
- """Load sd data."""
557
- self.segments = self._load_segments_rechash(data_dir["segments"])
558
- self.utt2spk = self._load_utt2spk(data_dir["utt2spk"])
559
- self.wavs = self._load_wav_zip(data_dir["wav.zip"])
560
- self.reco2dur = self._load_reco2dur(data_dir["reco2dur"])
561
-
562
- def _load_segments_rechash(self, segments_file):
563
- """Load segments file as dict with recid index."""
564
- ret = {}
565
- if not os.path.exists(segments_file):
566
- return None
567
- with open(segments_file, encoding="utf-8") as f:
568
- for line in f:
569
- utt, rec, st, et = line.strip().split()
570
- if rec not in ret:
571
- ret[rec] = []
572
- ret[rec].append({"utt": utt, "st": float(st), "et": float(et)})
573
- return ret
574
-
575
- def _load_wav_zip(self, wav_zip):
576
- """Return dictionary { rec: wav_rxfilename }."""
577
- wav_dir = os.path.join(wav_zip, "wav")
578
- return {
579
- os.path.splitext(filename)[0]: os.path.join(wav_dir, filename) for filename in sorted(os.listdir(wav_dir))
580
- }
581
-
582
- def _load_utt2spk(self, utt2spk_file):
583
- """Returns dictionary { uttid: spkid }."""
584
- with open(utt2spk_file, encoding="utf-8") as f:
585
- lines = [line.strip().split(None, 1) for line in f]
586
- return {x[0]: x[1] for x in lines}
587
-
588
- def _load_reco2dur(self, reco2dur_file):
589
- """Returns dictionary { recid: duration }."""
590
- if not os.path.exists(reco2dur_file):
591
- return None
592
- with open(reco2dur_file, encoding="utf-8") as f:
593
- lines = [line.strip().split(None, 1) for line in f]
594
- return {x[0]: float(x[1]) for x in lines}
595
-
596
-
597
- @dataclass
598
- class SdArgs:
599
- chunk_size: int = 2000
600
- frame_shift: int = 160
601
- subsampling: int = 1
602
- label_delay: int = 0
603
- num_speakers: int = 2
604
- rate: int = 16000
605
- use_last_samples: bool = True
606
-
607
-
608
- def _generate_chunk_indices(data, args, split=None):
609
- chunk_indices = [] if split != "test" else {}
610
- # make chunk indices: filepath, start_frame, end_frame
611
- for rec in data.wavs:
612
- data_len = int(data.reco2dur[rec] * args.rate / args.frame_shift)
613
- data_len = int(data_len / args.subsampling)
614
- if split == "test":
615
- chunk_indices[rec] = []
616
- if split != "test":
617
- for st, ed in _gen_frame_indices(
618
- data_len,
619
- args.chunk_size,
620
- args.chunk_size,
621
- args.use_last_samples,
622
- label_delay=args.label_delay,
623
- subsampling=args.subsampling,
624
- ):
625
- chunk_indices.append((rec, st * args.subsampling, ed * args.subsampling))
626
- else:
627
- for st, ed in _gen_chunk_indices(data_len, args.chunk_size):
628
- chunk_indices[rec].append((rec, st * args.subsampling, ed * args.subsampling))
629
- return chunk_indices
630
-
631
-
632
- def _count_frames(data_len, size, step):
633
- # no padding at edges, last remaining samples are ignored
634
- return int((data_len - size + step) / step)
635
-
636
-
637
- def _gen_frame_indices(data_length, size=2000, step=2000, use_last_samples=False, label_delay=0, subsampling=1):
638
- i = -1
639
- for i in range(_count_frames(data_length, size, step)):
640
- yield i * step, i * step + size
641
- if use_last_samples and i * step + size < data_length:
642
- if data_length - (i + 1) * step - subsampling * label_delay > 0:
643
- yield (i + 1) * step, data_length
644
-
645
-
646
- def _gen_chunk_indices(data_len, chunk_size):
647
- step = chunk_size
648
- start = 0
649
- while start < data_len:
650
- end = min(data_len, start + chunk_size)
651
- yield start, end
652
- start += step
653
-
654
-
655
- def _get_speakers(rec, data, args):
656
- return [
657
- {
658
- "speaker_id": data.utt2spk[segment["utt"]],
659
- "start": round(segment["st"] * args.rate / args.frame_shift),
660
- "end": round(segment["et"] * args.rate / args.frame_shift),
661
- }
662
- for segment in data.segments[rec]
663
- ]
664
-
665
-
666
- def _split_ks_files(archive_path, split):
667
- audio_path = os.path.join(archive_path, "**", "*.wav")
668
- audio_paths = glob.glob(audio_path)
669
- if split == "test":
670
- # use all available files for the test archive
671
- return {"test": audio_paths}
672
-
673
- val_list_file = os.path.join(archive_path, "validation_list.txt")
674
- test_list_file = os.path.join(archive_path, "testing_list.txt")
675
- with open(val_list_file, encoding="utf-8") as f:
676
- val_paths = f.read().strip().splitlines()
677
- val_paths = [os.path.join(archive_path, p) for p in val_paths]
678
- with open(test_list_file, encoding="utf-8") as f:
679
- test_paths = f.read().strip().splitlines()
680
- test_paths = [os.path.join(archive_path, p) for p in test_paths]
681
-
682
- # the paths for the train set is just whichever paths that do not exist in
683
- # either the test or validation splits
684
- train_paths = list(set(audio_paths) - set(val_paths) - set(test_paths))
685
-
686
- return {"train": train_paths, "val": val_paths}