| { | |
| "total_tokens_processed": 61085876, | |
| "total_bytes_processed": 221988643, | |
| "total_time": 18.71514630317688, | |
| "input": "../datasets/en-ja_valid.jsonl", | |
| "json_keys": [ | |
| "text" | |
| ], | |
| "split_sentences": false, | |
| "keep_newlines": false, | |
| "tokenizer_type": "HFPreTrainedTokenizer", | |
| "tokenizer_model": "/share/pretrained_lm/llm-jp/llm-jp-v3-3.7b", | |
| "vocab_file": null, | |
| "vocab_size": 786, | |
| "merge_file": null, | |
| "append_eod": true, | |
| "lang": "english", | |
| "output_prefix": "../datasets/valid/documents", | |
| "workers": 32, | |
| "partitions": 1, | |
| "log_interval": 1000, | |
| "keep_sequential_samples": false, | |
| "remove_bos": false, | |
| "keep_empty": false, | |
| "rank": 1, | |
| "make_vocab_size_divisible_by": 128, | |
| "tensor_model_parallel_size": 1, | |
| "vocab_extra_ids": 0, | |
| "padded_vocab_size": 99584 | |
| } |