Stanwang1210's picture
Upload README.md with huggingface_hub
6b74b01
metadata
configs:
  - config_name: default
    data_files:
      - split: test.other
        path: data/test.other-*
      - split: validation.other
        path: data/validation.other-*
      - split: train.other.500
        path: data/train.other.500-*
      - split: train.clean.100
        path: data/train.clean.100-*
      - split: test.clean
        path: data/test.clean-*
      - split: train.clean.360
        path: data/train.clean.360-*
      - split: validation.clean
        path: data/validation.clean-*
dataset_info:
  features:
    - name: text
      dtype: string
    - name: id
      dtype: string
    - name: audio_codes
      sequence:
        sequence: int64
  splits:
    - name: test.other
      num_bytes: 62049899
      num_examples: 2939
    - name: validation.other
      num_bytes: 59498714
      num_examples: 2864
    - name: train.other.500
      num_bytes: 5761561617
      num_examples: 148688
    - name: train.clean.100
      num_bytes: 1166450829
      num_examples: 28539
    - name: test.clean
      num_bytes: 62745230
      num_examples: 2620
    - name: train.clean.360
      num_bytes: 4216515060
      num_examples: 104014
    - name: validation.clean
      num_bytes: 62578176
      num_examples: 2703
  download_size: 1801683161
  dataset_size: 11391399525

Dataset Card for "speech_tokenizer_16k"

More Information needed