--- configs: - config_name: default data_files: - split: academicodec_hifi_16k_320d path: data/academicodec_hifi_16k_320d-* - split: academicodec_hifi_16k_320d_large_uni path: data/academicodec_hifi_16k_320d_large_uni-* - split: academicodec_hifi_24k_320d path: data/academicodec_hifi_24k_320d-* - split: audiodec_24k_320d path: data/audiodec_24k_320d-* - split: dac_16k path: data/dac_16k-* - split: dac_24k path: data/dac_24k-* - split: dac_44k path: data/dac_44k-* - split: encodec_24k path: data/encodec_24k-* - split: funcodec_en_libritts_16k_gr1nq32ds320 path: data/funcodec_en_libritts_16k_gr1nq32ds320-* - split: funcodec_en_libritts_16k_gr8nq32ds320 path: data/funcodec_en_libritts_16k_gr8nq32ds320-* - split: funcodec_en_libritts_16k_nq32ds320 path: data/funcodec_en_libritts_16k_nq32ds320-* - split: funcodec_en_libritts_16k_nq32ds640 path: data/funcodec_en_libritts_16k_nq32ds640-* - split: funcodec_zh_en_16k_nq32ds320 path: data/funcodec_zh_en_16k_nq32ds320-* - split: funcodec_zh_en_16k_nq32ds640 path: data/funcodec_zh_en_16k_nq32ds640-* - split: speech_tokenizer_16k path: data/speech_tokenizer_16k-* dataset_info: features: - name: id dtype: string - name: unit sequence: sequence: int64 splits: - name: academicodec_hifi_16k_320d num_bytes: 45943560 num_examples: 5720 - name: academicodec_hifi_16k_320d_large_uni num_bytes: 45943560 num_examples: 5720 - name: academicodec_hifi_24k_320d num_bytes: 68823560 num_examples: 5720 - name: audiodec_24k_320d num_bytes: 146707080 num_examples: 5720 - name: dac_16k num_bytes: 280417800 num_examples: 5720 - name: dac_24k num_bytes: 778378120 num_examples: 5720 - name: dac_44k num_bytes: 237517800 num_examples: 5720 - name: encodec_24k num_bytes: 34549320 num_examples: 5720 - name: funcodec_en_libritts_16k_gr1nq32ds320 num_bytes: 368368520 num_examples: 5720 - name: funcodec_en_libritts_16k_gr8nq32ds320 num_bytes: 368368520 num_examples: 5720 - name: funcodec_en_libritts_16k_nq32ds320 num_bytes: 366904200 num_examples: 5720 - name: funcodec_en_libritts_16k_nq32ds640 num_bytes: 183864200 num_examples: 5720 - name: funcodec_zh_en_16k_nq32ds320 num_bytes: 366904200 num_examples: 5720 - name: funcodec_zh_en_16k_nq32ds640 num_bytes: 366904200 num_examples: 5720 - name: speech_tokenizer_16k num_bytes: 91795080 num_examples: 5720 download_size: 593516002 dataset_size: 3751389720 --- # Dataset Card for "libricount_extract_unit" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)