Update README.md
#2
by zichenwen - opened
README.md
CHANGED
|
@@ -7,7 +7,8 @@ task_categories:
|
|
| 7 |
language:
|
| 8 |
- en
|
| 9 |
tags:
|
| 10 |
-
-
|
|
|
|
| 11 |
modalities:
|
| 12 |
- audio
|
| 13 |
- text
|
|
@@ -148,7 +149,7 @@ librispeech-long/
|
|
| 148 |
<speaker>-<chapter>.trans.txt
|
| 149 |
```
|
| 150 |
|
| 151 |
-
**Citation**:
|
| 152 |
```bibtex
|
| 153 |
@article{kahn2020libri,
|
| 154 |
title={Libri-light: A benchmark for asr with limited or no supervision},
|
|
@@ -156,7 +157,7 @@ librispeech-long/
|
|
| 156 |
journal={ICASSP 2020},
|
| 157 |
year={2020}
|
| 158 |
}
|
| 159 |
-
```
|
| 160 |
|
| 161 |
---
|
| 162 |
|
|
@@ -180,7 +181,7 @@ librispeech-long/
|
|
| 180 |
}
|
| 181 |
```
|
| 182 |
|
| 183 |
-
**Citation**:
|
| 184 |
```bibtex
|
| 185 |
@inproceedings{lai2017race,
|
| 186 |
title={RACE: Large-scale reading comprehension dataset from examinations},
|
|
@@ -188,7 +189,7 @@ librispeech-long/
|
|
| 188 |
booktitle={EMNLP},
|
| 189 |
year={2017}
|
| 190 |
}
|
| 191 |
-
```
|
| 192 |
|
| 193 |
---
|
| 194 |
|
|
@@ -222,7 +223,7 @@ librispeech-long/
|
|
| 222 |
**Source**: https://www.kaggle.com/datasets/andradaolteanu/gtzan-dataset-music-genre-classification
|
| 223 |
|
| 224 |
|
| 225 |
-
**Citation**:
|
| 226 |
```bibtex
|
| 227 |
@inproceedings{tzanetakis2002musical,
|
| 228 |
title={Musical genre classification of audio signals},
|
|
@@ -230,7 +231,7 @@ librispeech-long/
|
|
| 230 |
booktitle={IEEE Transactions on Speech and Audio Processing},
|
| 231 |
year={2002}
|
| 232 |
}
|
| 233 |
-
```
|
| 234 |
|
| 235 |
---
|
| 236 |
|
|
@@ -247,7 +248,7 @@ librispeech-long/
|
|
| 247 |
- `LICENSE`: Original license text
|
| 248 |
- `concatenated_resampled/`: Resampled audio files
|
| 249 |
|
| 250 |
-
**Citation**:
|
| 251 |
```bibtex
|
| 252 |
@article{mesaros2018detection,
|
| 253 |
title={Detection and classification of acoustic scenes and events: Outcome of the DCASE 2016 challenge},
|
|
@@ -255,7 +256,7 @@ librispeech-long/
|
|
| 255 |
journal={IEEE/ACM TASLP},
|
| 256 |
year={2018}
|
| 257 |
}
|
| 258 |
-
```
|
| 259 |
|
| 260 |
---
|
| 261 |
|
|
@@ -268,7 +269,7 @@ librispeech-long/
|
|
| 268 |
**⚠️ License**: Academic Use Only (access by request)
|
| 269 |
**Source**: https://engineering.jhu.edu/nsa/vesus/
|
| 270 |
|
| 271 |
-
**Citation**:
|
| 272 |
```bibtex
|
| 273 |
@inproceedings{sager2019vesus,
|
| 274 |
title={VESUS: A crowd-annotated database to study emotion production and perception in spoken English},
|
|
@@ -276,7 +277,7 @@ librispeech-long/
|
|
| 276 |
booktitle={Interspeech},
|
| 277 |
year={2019}
|
| 278 |
}
|
| 279 |
-
```
|
| 280 |
|
| 281 |
---
|
| 282 |
|
|
@@ -299,7 +300,7 @@ librispeech-long/
|
|
| 299 |
}
|
| 300 |
```
|
| 301 |
|
| 302 |
-
**Citation**:
|
| 303 |
```bibtex
|
| 304 |
@article{shon2022slue,
|
| 305 |
title={SLUE: New benchmark tasks for spoken language understanding evaluation on natural speech},
|
|
@@ -307,7 +308,7 @@ librispeech-long/
|
|
| 307 |
journal={ICASSP 2022},
|
| 308 |
year={2022}
|
| 309 |
}
|
| 310 |
-
```
|
| 311 |
|
| 312 |
---
|
| 313 |
|
|
@@ -325,7 +326,7 @@ librispeech-long/
|
|
| 325 |
- Must maintain attribution when redistributing
|
| 326 |
- See `license_public_eval.tsv` for per-file credits
|
| 327 |
|
| 328 |
-
**Citation**:
|
| 329 |
```bibtex
|
| 330 |
@inproceedings{turpault2019sound,
|
| 331 |
title={Sound event detection in domestic environments with weakly labeled data and soundscape synthesis},
|
|
@@ -333,7 +334,7 @@ librispeech-long/
|
|
| 333 |
booktitle={DCASE Workshop},
|
| 334 |
year={2019}
|
| 335 |
}
|
| 336 |
-
```
|
| 337 |
|
| 338 |
---
|
| 339 |
|
|
@@ -367,7 +368,7 @@ librispeech-long/
|
|
| 367 |
|
| 368 |
**Note**: Age/gender labels are derivative annotations on VoxCeleb corpus
|
| 369 |
|
| 370 |
-
**Citation**:
|
| 371 |
```bibtex
|
| 372 |
@inproceedings{nagrani2017voxceleb,
|
| 373 |
title={VoxCeleb: a large-scale speaker identification dataset},
|
|
@@ -375,7 +376,7 @@ librispeech-long/
|
|
| 375 |
booktitle={Interspeech},
|
| 376 |
year={2017}
|
| 377 |
}
|
| 378 |
-
```
|
| 379 |
|
| 380 |
---
|
| 381 |
|
|
|
|
| 7 |
language:
|
| 8 |
- en
|
| 9 |
tags:
|
| 10 |
+
- Audio
|
| 11 |
+
- Multi-modal Large Language Models
|
| 12 |
modalities:
|
| 13 |
- audio
|
| 14 |
- text
|
|
|
|
| 149 |
<speaker>-<chapter>.trans.txt
|
| 150 |
```
|
| 151 |
|
| 152 |
+
<!-- **Citation**:
|
| 153 |
```bibtex
|
| 154 |
@article{kahn2020libri,
|
| 155 |
title={Libri-light: A benchmark for asr with limited or no supervision},
|
|
|
|
| 157 |
journal={ICASSP 2020},
|
| 158 |
year={2020}
|
| 159 |
}
|
| 160 |
+
``` -->
|
| 161 |
|
| 162 |
---
|
| 163 |
|
|
|
|
| 181 |
}
|
| 182 |
```
|
| 183 |
|
| 184 |
+
<!-- **Citation**:
|
| 185 |
```bibtex
|
| 186 |
@inproceedings{lai2017race,
|
| 187 |
title={RACE: Large-scale reading comprehension dataset from examinations},
|
|
|
|
| 189 |
booktitle={EMNLP},
|
| 190 |
year={2017}
|
| 191 |
}
|
| 192 |
+
``` -->
|
| 193 |
|
| 194 |
---
|
| 195 |
|
|
|
|
| 223 |
**Source**: https://www.kaggle.com/datasets/andradaolteanu/gtzan-dataset-music-genre-classification
|
| 224 |
|
| 225 |
|
| 226 |
+
<!-- **Citation**:
|
| 227 |
```bibtex
|
| 228 |
@inproceedings{tzanetakis2002musical,
|
| 229 |
title={Musical genre classification of audio signals},
|
|
|
|
| 231 |
booktitle={IEEE Transactions on Speech and Audio Processing},
|
| 232 |
year={2002}
|
| 233 |
}
|
| 234 |
+
``` -->
|
| 235 |
|
| 236 |
---
|
| 237 |
|
|
|
|
| 248 |
- `LICENSE`: Original license text
|
| 249 |
- `concatenated_resampled/`: Resampled audio files
|
| 250 |
|
| 251 |
+
<!-- **Citation**:
|
| 252 |
```bibtex
|
| 253 |
@article{mesaros2018detection,
|
| 254 |
title={Detection and classification of acoustic scenes and events: Outcome of the DCASE 2016 challenge},
|
|
|
|
| 256 |
journal={IEEE/ACM TASLP},
|
| 257 |
year={2018}
|
| 258 |
}
|
| 259 |
+
``` -->
|
| 260 |
|
| 261 |
---
|
| 262 |
|
|
|
|
| 269 |
**⚠️ License**: Academic Use Only (access by request)
|
| 270 |
**Source**: https://engineering.jhu.edu/nsa/vesus/
|
| 271 |
|
| 272 |
+
<!-- **Citation**:
|
| 273 |
```bibtex
|
| 274 |
@inproceedings{sager2019vesus,
|
| 275 |
title={VESUS: A crowd-annotated database to study emotion production and perception in spoken English},
|
|
|
|
| 277 |
booktitle={Interspeech},
|
| 278 |
year={2019}
|
| 279 |
}
|
| 280 |
+
``` -->
|
| 281 |
|
| 282 |
---
|
| 283 |
|
|
|
|
| 300 |
}
|
| 301 |
```
|
| 302 |
|
| 303 |
+
<!-- **Citation**:
|
| 304 |
```bibtex
|
| 305 |
@article{shon2022slue,
|
| 306 |
title={SLUE: New benchmark tasks for spoken language understanding evaluation on natural speech},
|
|
|
|
| 308 |
journal={ICASSP 2022},
|
| 309 |
year={2022}
|
| 310 |
}
|
| 311 |
+
``` -->
|
| 312 |
|
| 313 |
---
|
| 314 |
|
|
|
|
| 326 |
- Must maintain attribution when redistributing
|
| 327 |
- See `license_public_eval.tsv` for per-file credits
|
| 328 |
|
| 329 |
+
<!-- **Citation**:
|
| 330 |
```bibtex
|
| 331 |
@inproceedings{turpault2019sound,
|
| 332 |
title={Sound event detection in domestic environments with weakly labeled data and soundscape synthesis},
|
|
|
|
| 334 |
booktitle={DCASE Workshop},
|
| 335 |
year={2019}
|
| 336 |
}
|
| 337 |
+
``` -->
|
| 338 |
|
| 339 |
---
|
| 340 |
|
|
|
|
| 368 |
|
| 369 |
**Note**: Age/gender labels are derivative annotations on VoxCeleb corpus
|
| 370 |
|
| 371 |
+
<!-- **Citation**:
|
| 372 |
```bibtex
|
| 373 |
@inproceedings{nagrani2017voxceleb,
|
| 374 |
title={VoxCeleb: a large-scale speaker identification dataset},
|
|
|
|
| 376 |
booktitle={Interspeech},
|
| 377 |
year={2017}
|
| 378 |
}
|
| 379 |
+
``` -->
|
| 380 |
|
| 381 |
---
|
| 382 |
|