Stack_Tokenized / README.md
Trevor Dohm
Tokenized oz
9ff25f3 verified
|
raw
history blame
364 Bytes
---
dataset_info:
features:
- name: content
dtype: string
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: train
num_bytes: 8807610
num_examples: 1258
download_size: 2629871
dataset_size: 8807610
configs:
- config_name: default
data_files:
- split: train
path: data/oz/train-*
---