File size: 1,360 Bytes
1d6d02e 0ecabb4 6b74b01 1d6d02e 0ecabb4 6b74b01 1d6d02e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
---
configs:
- config_name: default
data_files:
- split: test.other
path: data/test.other-*
- split: validation.other
path: data/validation.other-*
- split: train.other.500
path: data/train.other.500-*
- split: train.clean.100
path: data/train.clean.100-*
- split: test.clean
path: data/test.clean-*
- split: train.clean.360
path: data/train.clean.360-*
- split: validation.clean
path: data/validation.clean-*
dataset_info:
features:
- name: text
dtype: string
- name: id
dtype: string
- name: audio_codes
sequence:
sequence: int64
splits:
- name: test.other
num_bytes: 62049899
num_examples: 2939
- name: validation.other
num_bytes: 59498714
num_examples: 2864
- name: train.other.500
num_bytes: 5761561617
num_examples: 148688
- name: train.clean.100
num_bytes: 1166450829
num_examples: 28539
- name: test.clean
num_bytes: 62745230
num_examples: 2620
- name: train.clean.360
num_bytes: 4216515060
num_examples: 104014
- name: validation.clean
num_bytes: 62578176
num_examples: 2703
download_size: 1801683161
dataset_size: 11391399525
---
# Dataset Card for "speech_tokenizer_16k"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |