metadata
dataset_info:
features:
- name: token
dtype: large_string
- name: tokenizer_count
dtype: int64
splits:
- name: train
num_bytes: 870683594
num_examples: 26019341
download_size: 319616590
dataset_size: 870683594
configs:
- config_name: default
data_files:
- split: train
path: data/train-*