|
|
--- |
|
|
dataset_info: |
|
|
- config_name: metadata |
|
|
features: |
|
|
- name: _id |
|
|
dtype: string |
|
|
- name: id |
|
|
dtype: string |
|
|
- name: created_at |
|
|
dtype: timestamp[us, tz=UTC] |
|
|
- name: downloads_all_time |
|
|
dtype: int64 |
|
|
- name: downloads |
|
|
dtype: int64 |
|
|
- name: likes |
|
|
dtype: int64 |
|
|
- name: trending_score |
|
|
dtype: float64 |
|
|
- name: tokenizer_id |
|
|
dtype: int64 |
|
|
- name: hash |
|
|
dtype: string |
|
|
splits: |
|
|
- name: batch_0 |
|
|
num_bytes: 13156 |
|
|
num_examples: 100 |
|
|
- name: batch_1 |
|
|
num_bytes: 12911 |
|
|
num_examples: 100 |
|
|
- name: batch_2 |
|
|
num_bytes: 13009 |
|
|
num_examples: 100 |
|
|
- name: batch_3 |
|
|
num_bytes: 13092 |
|
|
num_examples: 100 |
|
|
- name: batch_4 |
|
|
num_bytes: 13282 |
|
|
num_examples: 100 |
|
|
download_size: 65425 |
|
|
dataset_size: 65450 |
|
|
- config_name: tokenizers |
|
|
features: |
|
|
- name: tokenizer_id |
|
|
dtype: int64 |
|
|
- name: hash |
|
|
dtype: string |
|
|
- name: vocab_hash |
|
|
dtype: string |
|
|
- name: vocab_size |
|
|
dtype: int64 |
|
|
- name: model_type |
|
|
dtype: string |
|
|
- name: num_merges |
|
|
dtype: int64 |
|
|
- name: has_normalizer |
|
|
dtype: bool |
|
|
- name: has_pre_tokenizer |
|
|
dtype: bool |
|
|
- name: has_post_processor |
|
|
dtype: bool |
|
|
- name: has_decoder |
|
|
dtype: bool |
|
|
- name: num_added_tokens |
|
|
dtype: int64 |
|
|
- name: normalizer_type |
|
|
dtype: string |
|
|
- name: pre_tokenizer_type |
|
|
dtype: string |
|
|
- name: decoder_type |
|
|
dtype: string |
|
|
- name: normalizer_types |
|
|
list: string |
|
|
- name: pre_tokenizer_types |
|
|
list: string |
|
|
- name: decoder_types |
|
|
list: string |
|
|
- name: version |
|
|
dtype: string |
|
|
- name: added_tokens |
|
|
dtype: string |
|
|
- name: normalizer |
|
|
dtype: string |
|
|
- name: pre_tokenizer |
|
|
dtype: string |
|
|
- name: post_processor |
|
|
dtype: string |
|
|
- name: decoder |
|
|
dtype: string |
|
|
- name: model |
|
|
dtype: string |
|
|
splits: |
|
|
- name: train |
|
|
num_bytes: 640975277 |
|
|
num_examples: 195 |
|
|
download_size: 359764431 |
|
|
dataset_size: 640975277 |
|
|
configs: |
|
|
- config_name: metadata |
|
|
data_files: |
|
|
- split: batch_0 |
|
|
path: metadata/batch_0-* |
|
|
- split: batch_1 |
|
|
path: metadata/batch_1-* |
|
|
- split: batch_2 |
|
|
path: metadata/batch_2-* |
|
|
- split: batch_3 |
|
|
path: metadata/batch_3-* |
|
|
- split: batch_4 |
|
|
path: metadata/batch_4-* |
|
|
- config_name: tokenizers |
|
|
data_files: |
|
|
- split: train |
|
|
path: tokenizers/train-* |
|
|
--- |
|
|
|