| | --- |
| | dataset_info: |
| | features: |
| | - name: _id |
| | dtype: large_string |
| | - name: id |
| | dtype: large_string |
| | - name: created_at |
| | dtype: timestamp[us, tz=UTC] |
| | - name: downloads_all_time |
| | dtype: int64 |
| | - name: downloads |
| | dtype: int64 |
| | - name: likes |
| | dtype: int64 |
| | - name: trending_score |
| | dtype: float64 |
| | - name: hash |
| | dtype: large_string |
| | - name: vocab_hash |
| | dtype: large_string |
| | - name: vocab_size |
| | dtype: int64 |
| | - name: model_type |
| | dtype: large_string |
| | - name: num_merges |
| | dtype: float64 |
| | - name: has_normalizer |
| | dtype: bool |
| | - name: has_pre_tokenizer |
| | dtype: bool |
| | - name: has_post_processor |
| | dtype: bool |
| | - name: has_decoder |
| | dtype: bool |
| | - name: num_added_tokens |
| | dtype: int64 |
| | - name: normalizer_type |
| | dtype: large_string |
| | - name: pre_tokenizer_type |
| | dtype: large_string |
| | - name: decoder_type |
| | dtype: large_string |
| | - name: normalizer_types |
| | list: string |
| | - name: pre_tokenizer_types |
| | list: string |
| | - name: decoder_types |
| | list: string |
| | - name: tokenizer_id |
| | dtype: int64 |
| | splits: |
| | - name: train |
| | num_bytes: 135916939 |
| | num_examples: 423650 |
| | download_size: 50377312 |
| | dataset_size: 135916939 |
| | configs: |
| | - config_name: default |
| | data_files: |
| | - split: train |
| | path: data/train-* |
| | --- |
| | |