metadata
dataset_info:
features:
- name: token
dtype: large_string
- name: tokenizer_count
dtype: int64
splits:
- name: train
num_bytes: 109255121
num_examples: 3728832
download_size: 40098044
dataset_size: 109255121
configs:
- config_name: default
data_files:
- split: train
path: data/train-*