File size: 2,279 Bytes
05d8fc8 8872ef7 05d8fc8 561e66f 6d04346 190bcc2 9d6ae2e 8872ef7 05d8fc8 561e66f 6d04346 190bcc2 9d6ae2e 8872ef7 05d8fc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
---
dataset_info:
- config_name: metadata
features:
- name: _id
dtype: string
- name: id
dtype: string
- name: created_at
dtype: timestamp[us, tz=UTC]
- name: downloads_all_time
dtype: int64
- name: downloads
dtype: int64
- name: likes
dtype: int64
- name: trending_score
dtype: float64
- name: tokenizer_id
dtype: int64
- name: hash
dtype: string
splits:
- name: batch_0
num_bytes: 13156
num_examples: 100
- name: batch_1
num_bytes: 12911
num_examples: 100
- name: batch_2
num_bytes: 13009
num_examples: 100
- name: batch_3
num_bytes: 13092
num_examples: 100
- name: batch_4
num_bytes: 13282
num_examples: 100
download_size: 65425
dataset_size: 65450
- config_name: tokenizers
features:
- name: tokenizer_id
dtype: int64
- name: hash
dtype: string
- name: vocab_hash
dtype: string
- name: vocab_size
dtype: int64
- name: model_type
dtype: string
- name: num_merges
dtype: int64
- name: has_normalizer
dtype: bool
- name: has_pre_tokenizer
dtype: bool
- name: has_post_processor
dtype: bool
- name: has_decoder
dtype: bool
- name: num_added_tokens
dtype: int64
- name: normalizer_type
dtype: string
- name: pre_tokenizer_type
dtype: string
- name: decoder_type
dtype: string
- name: normalizer_types
list: string
- name: pre_tokenizer_types
list: string
- name: decoder_types
list: string
- name: version
dtype: string
- name: added_tokens
dtype: string
- name: normalizer
dtype: string
- name: pre_tokenizer
dtype: string
- name: post_processor
dtype: string
- name: decoder
dtype: string
- name: model
dtype: string
splits:
- name: train
num_bytes: 640975277
num_examples: 195
download_size: 359764431
dataset_size: 640975277
configs:
- config_name: metadata
data_files:
- split: batch_0
path: metadata/batch_0-*
- split: batch_1
path: metadata/batch_1-*
- split: batch_2
path: metadata/batch_2-*
- split: batch_3
path: metadata/batch_3-*
- split: batch_4
path: metadata/batch_4-*
- config_name: tokenizers
data_files:
- split: train
path: tokenizers/train-*
---
|