Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- SEDDcoder_tokenizer/.ipynb_checkpoints/tokenizer——trainer-checkpoint.ipynb +1320 -0
- SEDDcoder_tokenizer/merges.txt +0 -0
- SEDDcoder_tokenizer/special_tokens_map.json +12 -0
- SEDDcoder_tokenizer/tokenizer.json +0 -0
- SEDDcoder_tokenizer/tokenizer_config.json +67 -0
- SEDDcoder_tokenizer/tokenizer_train_data_5gb/swallow_code.txt +3 -0
- SEDDcoder_tokenizer/tokenizer——trainer.ipynb +1320 -0
- SEDDcoder_tokenizer/vocab.json +0 -0
- best.ckpt +3 -0
- config.yaml +109 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
SEDDcoder_tokenizer/tokenizer_train_data_5gb/swallow_code.txt filter=lfs diff=lfs merge=lfs -text
|
SEDDcoder_tokenizer/.ipynb_checkpoints/tokenizer——trainer-checkpoint.ipynb
ADDED
|
@@ -0,0 +1,1320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"id": "5d3c5e9a-91a4-49b4-bbff-3349bf81f2ec",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [
|
| 9 |
+
{
|
| 10 |
+
"name": "stdout",
|
| 11 |
+
"output_type": "stream",
|
| 12 |
+
"text": [
|
| 13 |
+
"Collecting datasets\n",
|
| 14 |
+
" Downloading datasets-4.4.1-py3-none-any.whl.metadata (19 kB)\n",
|
| 15 |
+
"Collecting tokenizers\n",
|
| 16 |
+
" Downloading tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.8 kB)\n",
|
| 17 |
+
"Collecting transformers\n",
|
| 18 |
+
" Downloading transformers-4.57.3-py3-none-any.whl.metadata (43 kB)\n",
|
| 19 |
+
"Collecting tqdm\n",
|
| 20 |
+
" Using cached tqdm-4.67.1-py3-none-any.whl.metadata (57 kB)\n",
|
| 21 |
+
"Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from datasets) (3.20.0)\n",
|
| 22 |
+
"Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.12/dist-packages (from datasets) (2.1.2)\n",
|
| 23 |
+
"Collecting pyarrow>=21.0.0 (from datasets)\n",
|
| 24 |
+
" Downloading pyarrow-22.0.0-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (3.2 kB)\n",
|
| 25 |
+
"Collecting dill<0.4.1,>=0.3.0 (from datasets)\n",
|
| 26 |
+
" Downloading dill-0.4.0-py3-none-any.whl.metadata (10 kB)\n",
|
| 27 |
+
"Collecting pandas (from datasets)\n",
|
| 28 |
+
" Downloading pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl.metadata (91 kB)\n",
|
| 29 |
+
"Requirement already satisfied: requests>=2.32.2 in /usr/local/lib/python3.12/dist-packages (from datasets) (2.32.5)\n",
|
| 30 |
+
"Requirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from datasets) (0.28.1)\n",
|
| 31 |
+
"Collecting xxhash (from datasets)\n",
|
| 32 |
+
" Downloading xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (13 kB)\n",
|
| 33 |
+
"Collecting multiprocess<0.70.19 (from datasets)\n",
|
| 34 |
+
" Downloading multiprocess-0.70.18-py312-none-any.whl.metadata (7.5 kB)\n",
|
| 35 |
+
"Requirement already satisfied: fsspec<=2025.10.0,>=2023.1.0 in /usr/local/lib/python3.12/dist-packages (from fsspec[http]<=2025.10.0,>=2023.1.0->datasets) (2024.6.1)\n",
|
| 36 |
+
"Collecting huggingface-hub<2.0,>=0.25.0 (from datasets)\n",
|
| 37 |
+
" Using cached huggingface_hub-1.1.7-py3-none-any.whl.metadata (13 kB)\n",
|
| 38 |
+
"Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from datasets) (25.0)\n",
|
| 39 |
+
"Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from datasets) (6.0.3)\n",
|
| 40 |
+
"Collecting aiohttp!=4.0.0a0,!=4.0.0a1 (from fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 41 |
+
" Downloading aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (8.1 kB)\n",
|
| 42 |
+
"Requirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->datasets) (4.11.0)\n",
|
| 43 |
+
"Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->datasets) (2025.10.5)\n",
|
| 44 |
+
"Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->datasets) (1.0.9)\n",
|
| 45 |
+
"Requirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->datasets) (3.10)\n",
|
| 46 |
+
"Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->datasets) (0.16.0)\n",
|
| 47 |
+
"Collecting hf-xet<2.0.0,>=1.2.0 (from huggingface-hub<2.0,>=0.25.0->datasets)\n",
|
| 48 |
+
" Using cached hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.9 kB)\n",
|
| 49 |
+
"Collecting shellingham (from huggingface-hub<2.0,>=0.25.0->datasets)\n",
|
| 50 |
+
" Downloading shellingham-1.5.4-py2.py3-none-any.whl.metadata (3.5 kB)\n",
|
| 51 |
+
"Collecting typer-slim (from huggingface-hub<2.0,>=0.25.0->datasets)\n",
|
| 52 |
+
" Downloading typer_slim-0.20.0-py3-none-any.whl.metadata (16 kB)\n",
|
| 53 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.25.0->datasets) (4.15.0)\n",
|
| 54 |
+
"Collecting huggingface-hub<2.0,>=0.25.0 (from datasets)\n",
|
| 55 |
+
" Using cached huggingface_hub-0.36.0-py3-none-any.whl.metadata (14 kB)\n",
|
| 56 |
+
"Collecting regex!=2019.12.17 (from transformers)\n",
|
| 57 |
+
" Downloading regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (40 kB)\n",
|
| 58 |
+
"Collecting safetensors>=0.4.3 (from transformers)\n",
|
| 59 |
+
" Using cached safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.1 kB)\n",
|
| 60 |
+
"Collecting aiohappyeyeballs>=2.5.0 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 61 |
+
" Using cached aiohappyeyeballs-2.6.1-py3-none-any.whl.metadata (5.9 kB)\n",
|
| 62 |
+
"Collecting aiosignal>=1.4.0 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 63 |
+
" Using cached aiosignal-1.4.0-py3-none-any.whl.metadata (3.7 kB)\n",
|
| 64 |
+
"Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets) (25.4.0)\n",
|
| 65 |
+
"Collecting frozenlist>=1.1.1 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 66 |
+
" Downloading frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (20 kB)\n",
|
| 67 |
+
"Collecting multidict<7.0,>=4.5 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 68 |
+
" Downloading multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (5.3 kB)\n",
|
| 69 |
+
"Collecting propcache>=0.2.0 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 70 |
+
" Downloading propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (13 kB)\n",
|
| 71 |
+
"Collecting yarl<2.0,>=1.17.0 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 72 |
+
" Downloading yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (75 kB)\n",
|
| 73 |
+
"Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests>=2.32.2->datasets) (3.4.3)\n",
|
| 74 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests>=2.32.2->datasets) (2.5.0)\n",
|
| 75 |
+
"Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.12/dist-packages (from anyio->httpx<1.0.0->datasets) (1.3.1)\n",
|
| 76 |
+
"Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->datasets) (2.9.0.post0)\n",
|
| 77 |
+
"Collecting pytz>=2020.1 (from pandas->datasets)\n",
|
| 78 |
+
" Using cached pytz-2025.2-py2.py3-none-any.whl.metadata (22 kB)\n",
|
| 79 |
+
"Collecting tzdata>=2022.7 (from pandas->datasets)\n",
|
| 80 |
+
" Using cached tzdata-2025.2-py2.py3-none-any.whl.metadata (1.4 kB)\n",
|
| 81 |
+
"Requirement already satisfied: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.8.2->pandas->datasets) (1.16.0)\n",
|
| 82 |
+
"Collecting click>=8.0.0 (from typer-slim->huggingface-hub<2.0,>=0.25.0->datasets)\n",
|
| 83 |
+
" Downloading click-8.3.1-py3-none-any.whl.metadata (2.6 kB)\n",
|
| 84 |
+
"Downloading datasets-4.4.1-py3-none-any.whl (511 kB)\n",
|
| 85 |
+
"Downloading dill-0.4.0-py3-none-any.whl (119 kB)\n",
|
| 86 |
+
"Using cached hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.3 MB)\n",
|
| 87 |
+
"Downloading multiprocess-0.70.18-py312-none-any.whl (150 kB)\n",
|
| 88 |
+
"Downloading tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.3 MB)\n",
|
| 89 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.3/3.3 MB\u001b[0m \u001b[31m33.7 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n",
|
| 90 |
+
"\u001b[?25hDownloading transformers-4.57.3-py3-none-any.whl (12.0 MB)\n",
|
| 91 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.0/12.0 MB\u001b[0m \u001b[31m28.6 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0mm0:00:01\u001b[0m:00:01\u001b[0m\n",
|
| 92 |
+
"\u001b[?25hUsing cached huggingface_hub-0.36.0-py3-none-any.whl (566 kB)\n",
|
| 93 |
+
"Using cached tqdm-4.67.1-py3-none-any.whl (78 kB)\n",
|
| 94 |
+
"Downloading aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (1.8 MB)\n",
|
| 95 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m37.1 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n",
|
| 96 |
+
"\u001b[?25hDownloading multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (256 kB)\n",
|
| 97 |
+
"Downloading yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (377 kB)\n",
|
| 98 |
+
"Using cached aiohappyeyeballs-2.6.1-py3-none-any.whl (15 kB)\n",
|
| 99 |
+
"Using cached aiosignal-1.4.0-py3-none-any.whl (7.5 kB)\n",
|
| 100 |
+
"Downloading frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (242 kB)\n",
|
| 101 |
+
"Downloading propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (221 kB)\n",
|
| 102 |
+
"Downloading pyarrow-22.0.0-cp312-cp312-manylinux_2_28_x86_64.whl (47.7 MB)\n",
|
| 103 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m47.7/47.7 MB\u001b[0m \u001b[31m89.7 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m6m0:00:01\u001b[0m00:01\u001b[0m\n",
|
| 104 |
+
"\u001b[?25hDownloading regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (803 kB)\n",
|
| 105 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m803.5/803.5 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n",
|
| 106 |
+
"\u001b[?25hUsing cached safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (507 kB)\n",
|
| 107 |
+
"Downloading pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl (12.4 MB)\n",
|
| 108 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.4/12.4 MB\u001b[0m \u001b[31m46.4 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m6m0:00:01\u001b[0m\n",
|
| 109 |
+
"\u001b[?25hUsing cached pytz-2025.2-py2.py3-none-any.whl (509 kB)\n",
|
| 110 |
+
"Using cached tzdata-2025.2-py2.py3-none-any.whl (347 kB)\n",
|
| 111 |
+
"Downloading xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (193 kB)\n",
|
| 112 |
+
"Installing collected packages: pytz, xxhash, tzdata, tqdm, safetensors, regex, pyarrow, propcache, multidict, hf-xet, frozenlist, dill, aiohappyeyeballs, yarl, pandas, multiprocess, huggingface-hub, aiosignal, tokenizers, aiohttp, transformers, datasets\n",
|
| 113 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m22/22\u001b[0m [datasets]/22\u001b[0m [datasets]ers]ub]\n",
|
| 114 |
+
"\u001b[1A\u001b[2KSuccessfully installed aiohappyeyeballs-2.6.1 aiohttp-3.13.2 aiosignal-1.4.0 datasets-4.4.1 dill-0.4.0 frozenlist-1.8.0 hf-xet-1.2.0 huggingface-hub-0.36.0 multidict-6.7.0 multiprocess-0.70.18 pandas-2.3.3 propcache-0.4.1 pyarrow-22.0.0 pytz-2025.2 regex-2025.11.3 safetensors-0.7.0 tokenizers-0.22.1 tqdm-4.67.1 transformers-4.57.3 tzdata-2025.2 xxhash-3.6.0 yarl-1.22.0\n",
|
| 115 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
| 116 |
+
]
|
| 117 |
+
}
|
| 118 |
+
],
|
| 119 |
+
"source": [
|
| 120 |
+
"pip install datasets tokenizers transformers tqdm"
|
| 121 |
+
]
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"cell_type": "code",
|
| 125 |
+
"execution_count": 3,
|
| 126 |
+
"id": "40d6aead-8bb3-4e91-aff2-f76491a4d768",
|
| 127 |
+
"metadata": {},
|
| 128 |
+
"outputs": [
|
| 129 |
+
{
|
| 130 |
+
"name": "stderr",
|
| 131 |
+
"output_type": "stream",
|
| 132 |
+
"text": [
|
| 133 |
+
"`trust_remote_code` is not supported anymore.\n",
|
| 134 |
+
"Please check that the Hugging Face dataset 'tokyotech-llm/swallow-code' isn't based on a loading script and remove `trust_remote_code`.\n",
|
| 135 |
+
"If the dataset is based on a loading script, please ask the dataset author to remove it and convert it to a standard format like Parquet.\n"
|
| 136 |
+
]
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"name": "stdout",
|
| 140 |
+
"output_type": "stream",
|
| 141 |
+
"text": [
|
| 142 |
+
"=== 开始采样 | 目标总大小: 5 GB ===\n",
|
| 143 |
+
"\n",
|
| 144 |
+
" 正在下载: swallow_code\n"
|
| 145 |
+
]
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"data": {
|
| 149 |
+
"application/vnd.jupyter.widget-view+json": {
|
| 150 |
+
"model_id": "d4c7dcab1bb0400183b51dfcc1635fbc",
|
| 151 |
+
"version_major": 2,
|
| 152 |
+
"version_minor": 0
|
| 153 |
+
},
|
| 154 |
+
"text/plain": [
|
| 155 |
+
"README.md: 0.00B [00:00, ?B/s]"
|
| 156 |
+
]
|
| 157 |
+
},
|
| 158 |
+
"metadata": {},
|
| 159 |
+
"output_type": "display_data"
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"name": "stderr",
|
| 163 |
+
"output_type": "stream",
|
| 164 |
+
"text": [
|
| 165 |
+
" 0%| | 3.61M/3.76G [00:10<2:04:51, 501kB/s] "
|
| 166 |
+
]
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"ename": "KeyboardInterrupt",
|
| 170 |
+
"evalue": "",
|
| 171 |
+
"output_type": "error",
|
| 172 |
+
"traceback": [
|
| 173 |
+
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
|
| 174 |
+
"\u001b[31mKeyboardInterrupt\u001b[39m Traceback (most recent call last)",
|
| 175 |
+
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 186\u001b[39m\n\u001b[32m 183\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33m 词表大小: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfast_tokenizer.vocab_size\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 185\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[34m__name__\u001b[39m == \u001b[33m\"\u001b[39m\u001b[33m__main__\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m186\u001b[39m files = \u001b[43msample_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 187\u001b[39m train_coder_tokenizer(files)\n",
|
| 176 |
+
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 109\u001b[39m, in \u001b[36msample_data\u001b[39m\u001b[34m()\u001b[39m\n\u001b[32m 106\u001b[39m pbar = tqdm(total=target_bytes, unit=\u001b[33m'\u001b[39m\u001b[33mB\u001b[39m\u001b[33m'\u001b[39m, unit_scale=\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[32m 108\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(output_file, \u001b[33m\"\u001b[39m\u001b[33mw\u001b[39m\u001b[33m\"\u001b[39m, encoding=\u001b[33m\"\u001b[39m\u001b[33mutf-8\u001b[39m\u001b[33m\"\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[32m--> \u001b[39m\u001b[32m109\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43msample\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mds_shuffled\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 110\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mtry\u001b[39;49;00m\u001b[43m:\u001b[49m\n\u001b[32m 111\u001b[39m \u001b[43m \u001b[49m\u001b[43mtext\u001b[49m\u001b[43m \u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[43mextract_text_from_sample\u001b[49m\u001b[43m(\u001b[49m\u001b[43msample\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 177 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/iterable_dataset.py:2538\u001b[39m, in \u001b[36mIterableDataset.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 2535\u001b[39m \u001b[38;5;28;01myield\u001b[39;00m formatter.format_row(pa_table)\n\u001b[32m 2536\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m2538\u001b[39m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mexample\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mex_iterable\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 2539\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# no need to format thanks to FormattedExamplesIterable\u001b[39;49;00m\n\u001b[32m 2540\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01myield\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mexample\u001b[49m\n",
|
| 178 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/iterable_dataset.py:1714\u001b[39m, in \u001b[36mBufferShuffledExamplesIterable.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 1712\u001b[39m \u001b[38;5;66;03m# this is the shuffle buffer that we keep in memory\u001b[39;00m\n\u001b[32m 1713\u001b[39m mem_buffer = []\n\u001b[32m-> \u001b[39m\u001b[32m1714\u001b[39m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mex_iterable\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 1715\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mmem_buffer\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[43m==\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer_size\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# if the buffer is full, pick and example from it\u001b[39;49;00m\n\u001b[32m 1716\u001b[39m \u001b[43m \u001b[49m\u001b[43mi\u001b[49m\u001b[43m \u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mindices_iterator\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 179 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/iterable_dataset.py:513\u001b[39m, in \u001b[36mRebatchedArrowExamplesIterable.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 512\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__iter__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[32m--> \u001b[39m\u001b[32m513\u001b[39m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m.ex_iterable\n",
|
| 180 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/iterable_dataset.py:437\u001b[39m, in \u001b[36mShuffledDataSourcesArrowExamplesIterable.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 435\u001b[39m shard_example_idx_start = \u001b[38;5;28mself\u001b[39m._state_dict[\u001b[33m\"\u001b[39m\u001b[33mshard_example_idx\u001b[39m\u001b[33m\"\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m._state_dict \u001b[38;5;28;01melse\u001b[39;00m \u001b[32m0\u001b[39m\n\u001b[32m 436\u001b[39m shard_example_idx = \u001b[32m0\u001b[39m\n\u001b[32m--> \u001b[39m\u001b[32m437\u001b[39m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mgenerate_tables_fn\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mgen_kwags\u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 438\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mshard_example_idx\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[43m<\u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[43mshard_example_idx_start\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 439\u001b[39m \u001b[43m \u001b[49m\u001b[43mshard_example_idx\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 181 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/packaged_modules/json/json.py:137\u001b[39m, in \u001b[36mJson._generate_tables\u001b[39m\u001b[34m(self, files)\u001b[39m\n\u001b[32m 133\u001b[39m encoding_errors = (\n\u001b[32m 134\u001b[39m \u001b[38;5;28mself\u001b[39m.config.encoding_errors \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.config.encoding_errors \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mstrict\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 135\u001b[39m )\n\u001b[32m 136\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m137\u001b[39m batch = \u001b[43mf\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m.\u001b[49m\u001b[43mchunksize\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 138\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m batch:\n\u001b[32m 139\u001b[39m \u001b[38;5;28;01mbreak\u001b[39;00m\n",
|
| 182 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/utils/file_utils.py:824\u001b[39m, in \u001b[36m_add_retries_to_file_obj_read_method.<locals>.read_with_retries\u001b[39m\u001b[34m(*args, **kwargs)\u001b[39m\n\u001b[32m 822\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m retry \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[32m1\u001b[39m, max_retries + \u001b[32m1\u001b[39m):\n\u001b[32m 823\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m824\u001b[39m out = \u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 825\u001b[39m \u001b[38;5;28;01mbreak\u001b[39;00m\n\u001b[32m 826\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m CONNECTION_ERRORS_TO_RETRY \u001b[38;5;28;01mas\u001b[39;00m err:\n",
|
| 183 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/hf_file_system.py:1016\u001b[39m, in \u001b[36mHfFileSystemFile.read\u001b[39m\u001b[34m(self, length)\u001b[39m\n\u001b[32m 1014\u001b[39m \u001b[38;5;28mself\u001b[39m.loc += \u001b[38;5;28mlen\u001b[39m(out)\n\u001b[32m 1015\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m out\n\u001b[32m-> \u001b[39m\u001b[32m1016\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlength\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 184 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/fsspec/spec.py:1941\u001b[39m, in \u001b[36mAbstractBufferedFile.read\u001b[39m\u001b[34m(self, length)\u001b[39m\n\u001b[32m 1938\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m length == \u001b[32m0\u001b[39m:\n\u001b[32m 1939\u001b[39m \u001b[38;5;66;03m# don't even bother calling fetch\u001b[39;00m\n\u001b[32m 1940\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33mb\u001b[39m\u001b[33m\"\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m-> \u001b[39m\u001b[32m1941\u001b[39m out = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mcache\u001b[49m\u001b[43m.\u001b[49m\u001b[43m_fetch\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mloc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mloc\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m \u001b[49m\u001b[43mlength\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1943\u001b[39m logger.debug(\n\u001b[32m 1944\u001b[39m \u001b[33m\"\u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m read: \u001b[39m\u001b[38;5;132;01m%i\u001b[39;00m\u001b[33m - \u001b[39m\u001b[38;5;132;01m%i\u001b[39;00m\u001b[33m \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m\"\u001b[39m,\n\u001b[32m 1945\u001b[39m \u001b[38;5;28mself\u001b[39m,\n\u001b[32m (...)\u001b[39m\u001b[32m 1948\u001b[39m \u001b[38;5;28mself\u001b[39m.cache._log_stats(),\n\u001b[32m 1949\u001b[39m )\n\u001b[32m 1950\u001b[39m \u001b[38;5;28mself\u001b[39m.loc += \u001b[38;5;28mlen\u001b[39m(out)\n",
|
| 185 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/fsspec/caching.py:234\u001b[39m, in \u001b[36mReadAheadCache._fetch\u001b[39m\u001b[34m(self, start, end)\u001b[39m\n\u001b[32m 232\u001b[39m end = \u001b[38;5;28mmin\u001b[39m(\u001b[38;5;28mself\u001b[39m.size, end + \u001b[38;5;28mself\u001b[39m.blocksize)\n\u001b[32m 233\u001b[39m \u001b[38;5;28mself\u001b[39m.total_requested_bytes += end - start\n\u001b[32m--> \u001b[39m\u001b[32m234\u001b[39m \u001b[38;5;28mself\u001b[39m.cache = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mfetcher\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstart\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mend\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# new block replaces old\u001b[39;00m\n\u001b[32m 235\u001b[39m \u001b[38;5;28mself\u001b[39m.start = start\n\u001b[32m 236\u001b[39m \u001b[38;5;28mself\u001b[39m.end = \u001b[38;5;28mself\u001b[39m.start + \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m.cache)\n",
|
| 186 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/hf_file_system.py:976\u001b[39m, in \u001b[36mHfFileSystemFile._fetch_range\u001b[39m\u001b[34m(self, start, end)\u001b[39m\n\u001b[32m 965\u001b[39m headers = {\n\u001b[32m 966\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mrange\u001b[39m\u001b[33m\"\u001b[39m: \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mbytes=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mstart\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m-\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mend\u001b[38;5;250m \u001b[39m-\u001b[38;5;250m \u001b[39m\u001b[32m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m,\n\u001b[32m 967\u001b[39m **\u001b[38;5;28mself\u001b[39m.fs._api._build_hf_headers(),\n\u001b[32m 968\u001b[39m }\n\u001b[32m 969\u001b[39m url = hf_hub_url(\n\u001b[32m 970\u001b[39m repo_id=\u001b[38;5;28mself\u001b[39m.resolved_path.repo_id,\n\u001b[32m 971\u001b[39m revision=\u001b[38;5;28mself\u001b[39m.resolved_path.revision,\n\u001b[32m (...)\u001b[39m\u001b[32m 974\u001b[39m endpoint=\u001b[38;5;28mself\u001b[39m.fs.endpoint,\n\u001b[32m 975\u001b[39m )\n\u001b[32m--> \u001b[39m\u001b[32m976\u001b[39m r = \u001b[43mhttp_backoff\u001b[49m\u001b[43m(\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mGET\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[43m=\u001b[49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mconstants\u001b[49m\u001b[43m.\u001b[49m\u001b[43mHF_HUB_DOWNLOAD_TIMEOUT\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 977\u001b[39m hf_raise_for_status(r)\n\u001b[32m 978\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m r.content\n",
|
| 187 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_http.py:306\u001b[39m, in \u001b[36mhttp_backoff\u001b[39m\u001b[34m(method, url, max_retries, base_wait_time, max_wait_time, retry_on_exceptions, retry_on_status_codes, **kwargs)\u001b[39m\n\u001b[32m 303\u001b[39m kwargs[\u001b[33m\"\u001b[39m\u001b[33mdata\u001b[39m\u001b[33m\"\u001b[39m].seek(io_obj_initial_pos)\n\u001b[32m 305\u001b[39m \u001b[38;5;66;03m# Perform request and return if status_code is not in the retry list.\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m306\u001b[39m response = \u001b[43msession\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m=\u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 307\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m response.status_code \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m retry_on_status_codes:\n\u001b[32m 308\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m response\n",
|
| 188 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/sessions.py:589\u001b[39m, in \u001b[36mSession.request\u001b[39m\u001b[34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[39m\n\u001b[32m 584\u001b[39m send_kwargs = {\n\u001b[32m 585\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mtimeout\u001b[39m\u001b[33m\"\u001b[39m: timeout,\n\u001b[32m 586\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mallow_redirects\u001b[39m\u001b[33m\"\u001b[39m: allow_redirects,\n\u001b[32m 587\u001b[39m }\n\u001b[32m 588\u001b[39m send_kwargs.update(settings)\n\u001b[32m--> \u001b[39m\u001b[32m589\u001b[39m resp = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprep\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43msend_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 591\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m resp\n",
|
| 189 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/sessions.py:724\u001b[39m, in \u001b[36mSession.send\u001b[39m\u001b[34m(self, request, **kwargs)\u001b[39m\n\u001b[32m 721\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m allow_redirects:\n\u001b[32m 722\u001b[39m \u001b[38;5;66;03m# Redirect resolving generator.\u001b[39;00m\n\u001b[32m 723\u001b[39m gen = \u001b[38;5;28mself\u001b[39m.resolve_redirects(r, request, **kwargs)\n\u001b[32m--> \u001b[39m\u001b[32m724\u001b[39m history = \u001b[43m[\u001b[49m\u001b[43mresp\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mresp\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mgen\u001b[49m\u001b[43m]\u001b[49m\n\u001b[32m 725\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 726\u001b[39m history = []\n",
|
| 190 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/sessions.py:265\u001b[39m, in \u001b[36mSessionRedirectMixin.resolve_redirects\u001b[39m\u001b[34m(self, resp, req, stream, timeout, verify, cert, proxies, yield_requests, **adapter_kwargs)\u001b[39m\n\u001b[32m 263\u001b[39m \u001b[38;5;28;01myield\u001b[39;00m req\n\u001b[32m 264\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m265\u001b[39m resp = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 266\u001b[39m \u001b[43m \u001b[49m\u001b[43mreq\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 267\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 268\u001b[39m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 269\u001b[39m \u001b[43m \u001b[49m\u001b[43mverify\u001b[49m\u001b[43m=\u001b[49m\u001b[43mverify\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 270\u001b[39m \u001b[43m \u001b[49m\u001b[43mcert\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcert\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 271\u001b[39m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[43m=\u001b[49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 272\u001b[39m \u001b[43m \u001b[49m\u001b[43mallow_redirects\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 273\u001b[39m \u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43madapter_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 274\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 276\u001b[39m extract_cookies_to_jar(\u001b[38;5;28mself\u001b[39m.cookies, prepared_request, resp.raw)\n\u001b[32m 278\u001b[39m \u001b[38;5;66;03m# extract redirect url, if any, for the next loop\u001b[39;00m\n",
|
| 191 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/sessions.py:746\u001b[39m, in \u001b[36mSession.send\u001b[39m\u001b[34m(self, request, **kwargs)\u001b[39m\n\u001b[32m 743\u001b[39m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[32m 745\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m stream:\n\u001b[32m--> \u001b[39m\u001b[32m746\u001b[39m \u001b[43mr\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcontent\u001b[49m\n\u001b[32m 748\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m r\n",
|
| 192 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/models.py:902\u001b[39m, in \u001b[36mResponse.content\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 900\u001b[39m \u001b[38;5;28mself\u001b[39m._content = \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 901\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m902\u001b[39m \u001b[38;5;28mself\u001b[39m._content = \u001b[33;43mb\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43miter_content\u001b[49m\u001b[43m(\u001b[49m\u001b[43mCONTENT_CHUNK_SIZE\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;129;01mor\u001b[39;00m \u001b[33mb\u001b[39m\u001b[33m\"\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 904\u001b[39m \u001b[38;5;28mself\u001b[39m._content_consumed = \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[32m 905\u001b[39m \u001b[38;5;66;03m# don't need to release the connection; that's been handled by urllib3\u001b[39;00m\n\u001b[32m 906\u001b[39m \u001b[38;5;66;03m# since we exhausted the data.\u001b[39;00m\n",
|
| 193 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/models.py:820\u001b[39m, in \u001b[36mResponse.iter_content.<locals>.generate\u001b[39m\u001b[34m()\u001b[39m\n\u001b[32m 818\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m.raw, \u001b[33m\"\u001b[39m\u001b[33mstream\u001b[39m\u001b[33m\"\u001b[39m):\n\u001b[32m 819\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m820\u001b[39m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m.raw.stream(chunk_size, decode_content=\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[32m 821\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m ProtocolError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[32m 822\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m ChunkedEncodingError(e)\n",
|
| 194 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/urllib3/response.py:1091\u001b[39m, in \u001b[36mHTTPResponse.stream\u001b[39m\u001b[34m(self, amt, decode_content)\u001b[39m\n\u001b[32m 1089\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1090\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_fp_closed(\u001b[38;5;28mself\u001b[39m._fp) \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m._decoded_buffer) > \u001b[32m0\u001b[39m:\n\u001b[32m-> \u001b[39m\u001b[32m1091\u001b[39m data = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m=\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[43m=\u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1093\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m data:\n\u001b[32m 1094\u001b[39m \u001b[38;5;28;01myield\u001b[39;00m data\n",
|
| 195 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/urllib3/response.py:980\u001b[39m, in \u001b[36mHTTPResponse.read\u001b[39m\u001b[34m(self, amt, decode_content, cache_content)\u001b[39m\n\u001b[32m 977\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m._decoded_buffer) >= amt:\n\u001b[32m 978\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._decoded_buffer.get(amt)\n\u001b[32m--> \u001b[39m\u001b[32m980\u001b[39m data = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_raw_read\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 982\u001b[39m flush_decoder = amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m (amt != \u001b[32m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data)\n\u001b[32m 984\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m._decoded_buffer) == \u001b[32m0\u001b[39m:\n",
|
| 196 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/urllib3/response.py:904\u001b[39m, in \u001b[36mHTTPResponse._raw_read\u001b[39m\u001b[34m(self, amt, read1)\u001b[39m\n\u001b[32m 901\u001b[39m fp_closed = \u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mself\u001b[39m._fp, \u001b[33m\"\u001b[39m\u001b[33mclosed\u001b[39m\u001b[33m\"\u001b[39m, \u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[32m 903\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m._error_catcher():\n\u001b[32m--> \u001b[39m\u001b[32m904\u001b[39m data = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_fp_read\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mread1\u001b[49m\u001b[43m=\u001b[49m\u001b[43mread1\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m fp_closed \u001b[38;5;28;01melse\u001b[39;00m \u001b[33mb\u001b[39m\u001b[33m\"\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 905\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m amt != \u001b[32m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data:\n\u001b[32m 906\u001b[39m \u001b[38;5;66;03m# Platform-specific: Buggy versions of Python.\u001b[39;00m\n\u001b[32m 907\u001b[39m \u001b[38;5;66;03m# Close the connection when no data is returned\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 912\u001b[39m \u001b[38;5;66;03m# not properly close the connection in all cases. There is\u001b[39;00m\n\u001b[32m 913\u001b[39m \u001b[38;5;66;03m# no harm in redundantly calling close.\u001b[39;00m\n\u001b[32m 914\u001b[39m \u001b[38;5;28mself\u001b[39m._fp.close()\n",
|
| 197 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/urllib3/response.py:887\u001b[39m, in \u001b[36mHTTPResponse._fp_read\u001b[39m\u001b[34m(self, amt, read1)\u001b[39m\n\u001b[32m 884\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._fp.read1(amt) \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m._fp.read1()\n\u001b[32m 885\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 886\u001b[39m \u001b[38;5;66;03m# StringIO doesn't like amt=None\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m887\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_fp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m._fp.read()\n",
|
| 198 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/lib/python3.12/http/client.py:479\u001b[39m, in \u001b[36mHTTPResponse.read\u001b[39m\u001b[34m(self, amt)\u001b[39m\n\u001b[32m 476\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.length \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m amt > \u001b[38;5;28mself\u001b[39m.length:\n\u001b[32m 477\u001b[39m \u001b[38;5;66;03m# clip the read to the \"end of response\"\u001b[39;00m\n\u001b[32m 478\u001b[39m amt = \u001b[38;5;28mself\u001b[39m.length\n\u001b[32m--> \u001b[39m\u001b[32m479\u001b[39m s = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mfp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 480\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m s \u001b[38;5;129;01mand\u001b[39;00m amt:\n\u001b[32m 481\u001b[39m \u001b[38;5;66;03m# Ideally, we would raise IncompleteRead if the content-length\u001b[39;00m\n\u001b[32m 482\u001b[39m \u001b[38;5;66;03m# wasn't satisfied, but it might break compatibility.\u001b[39;00m\n\u001b[32m 483\u001b[39m \u001b[38;5;28mself\u001b[39m._close_conn()\n",
|
| 199 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/lib/python3.12/socket.py:707\u001b[39m, in \u001b[36mSocketIO.readinto\u001b[39m\u001b[34m(self, b)\u001b[39m\n\u001b[32m 705\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[32m 706\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m707\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_sock\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrecv_into\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 708\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m timeout:\n\u001b[32m 709\u001b[39m \u001b[38;5;28mself\u001b[39m._timeout_occurred = \u001b[38;5;28;01mTrue\u001b[39;00m\n",
|
| 200 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/lib/python3.12/ssl.py:1252\u001b[39m, in \u001b[36mSSLSocket.recv_into\u001b[39m\u001b[34m(self, buffer, nbytes, flags)\u001b[39m\n\u001b[32m 1248\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m flags != \u001b[32m0\u001b[39m:\n\u001b[32m 1249\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 1250\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mnon-zero flags not allowed in calls to recv_into() on \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m\"\u001b[39m %\n\u001b[32m 1251\u001b[39m \u001b[38;5;28mself\u001b[39m.\u001b[34m__class__\u001b[39m)\n\u001b[32m-> \u001b[39m\u001b[32m1252\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnbytes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1253\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1254\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m().recv_into(buffer, nbytes, flags)\n",
|
| 201 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/lib/python3.12/ssl.py:1104\u001b[39m, in \u001b[36mSSLSocket.read\u001b[39m\u001b[34m(self, len, buffer)\u001b[39m\n\u001b[32m 1102\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m 1103\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m buffer \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1104\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_sslobj\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1105\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1106\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._sslobj.read(\u001b[38;5;28mlen\u001b[39m)\n",
|
| 202 |
+
"\u001b[31mKeyboardInterrupt\u001b[39m: "
|
| 203 |
+
]
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"name": "stderr",
|
| 207 |
+
"output_type": "stream",
|
| 208 |
+
"text": [
|
| 209 |
+
" 0%| | 12.9M/3.76G [00:27<2:04:33, 501kB/s]"
|
| 210 |
+
]
|
| 211 |
+
}
|
| 212 |
+
],
|
| 213 |
+
"source": [
|
| 214 |
+
"import os\n",
|
| 215 |
+
"import glob\n",
|
| 216 |
+
"from datasets import load_dataset\n",
|
| 217 |
+
"from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers\n",
|
| 218 |
+
"from tqdm import tqdm\n",
|
| 219 |
+
"from transformers import GPT2TokenizerFast\n",
|
| 220 |
+
"\n",
|
| 221 |
+
"# ================= 1. 全局配置 =================\n",
|
| 222 |
+
"\n",
|
| 223 |
+
"# 总采样大小: 5GB\n",
|
| 224 |
+
"TOTAL_SIZE_GB = 5\n",
|
| 225 |
+
"OUTPUT_DIR = \"./tokenizer_train_data_5gb\"\n",
|
| 226 |
+
"\n",
|
| 227 |
+
"# 数据集配比配置\n",
|
| 228 |
+
"DATASET_CONFIG = [\n",
|
| 229 |
+
" # --- 1. 代码 (70% = 3.5GB) ---\n",
|
| 230 |
+
" # 指定使用 Swallow-Code 的 \"exp11-scor\" 子集\n",
|
| 231 |
+
" {\n",
|
| 232 |
+
" \"name\": \"swallow_code\",\n",
|
| 233 |
+
" \"hf_id\": \"tokyotech-llm/swallow-code\",\n",
|
| 234 |
+
" \"subset\": \"exp11-scor\", \n",
|
| 235 |
+
" \"ratio\": 0.70,\n",
|
| 236 |
+
" },\n",
|
| 237 |
+
" \n",
|
| 238 |
+
" # --- 2. 数学 (10% = 0.5GB) ---\n",
|
| 239 |
+
" # Swallow-Math\n",
|
| 240 |
+
" {\n",
|
| 241 |
+
" \"name\": \"swallow_math\",\n",
|
| 242 |
+
" \"hf_id\": \"tokyotech-llm/swallow-math\",\n",
|
| 243 |
+
" \"subset\": None, # 默认配置\n",
|
| 244 |
+
" \"ratio\": 0.10,\n",
|
| 245 |
+
" },\n",
|
| 246 |
+
" \n",
|
| 247 |
+
" # --- 3. 英文通用 (15% = 0.75GB) ---\n",
|
| 248 |
+
" # Fineweb\n",
|
| 249 |
+
" {\n",
|
| 250 |
+
" \"name\": \"english_fineweb\",\n",
|
| 251 |
+
" \"hf_id\": \"HuggingFaceFW/fineweb-edu\", \n",
|
| 252 |
+
" \"subset\": \"sample-10BT\", # 使用它的 100亿 token 采样版,足够了\n",
|
| 253 |
+
" \"ratio\": 0.15,\n",
|
| 254 |
+
" },\n",
|
| 255 |
+
" \n",
|
| 256 |
+
" # --- 4. 中文通用 (5% = 0.25GB) ---\n",
|
| 257 |
+
" # SkyPile (高质量中文)\n",
|
| 258 |
+
" {\n",
|
| 259 |
+
" \"name\": \"chinese_skypile\",\n",
|
| 260 |
+
" \"hf_id\": \"Skywork/SkyPile-150B\",\n",
|
| 261 |
+
" \"subset\": None,\n",
|
| 262 |
+
" \"ratio\": 0.05,\n",
|
| 263 |
+
" }\n",
|
| 264 |
+
"]\n",
|
| 265 |
+
"\n",
|
| 266 |
+
"# ================= 2. 智能文本提取 =================\n",
|
| 267 |
+
"\n",
|
| 268 |
+
"def extract_text_from_sample(sample):\n",
|
| 269 |
+
" # 1. 优先查找单一文本列 (按优先级排序)\n",
|
| 270 |
+
" # 大多数数据集都在这里能找到\n",
|
| 271 |
+
" text_cols = [\"content\", \"text\", \"body\", \"code\", \"response\"] \n",
|
| 272 |
+
" for col in text_cols:\n",
|
| 273 |
+
" if col in sample and isinstance(sample[col], str) and len(sample[col]) > 0:\n",
|
| 274 |
+
" return sample[col]\n",
|
| 275 |
+
" \n",
|
| 276 |
+
" # 2. 特殊处理:数学问答对\n",
|
| 277 |
+
" if \"question\" in sample and \"answer\" in sample:\n",
|
| 278 |
+
" q = sample.get(\"question\", \"\")\n",
|
| 279 |
+
" a = sample.get(\"answer\", \"\")\n",
|
| 280 |
+
" return f\"Question:\\n{q}\\n\\nAnswer:\\n{a}\"\n",
|
| 281 |
+
" \n",
|
| 282 |
+
" # 3. 如果没找到,返回 None,让主循环跳过,而不是盲猜\n",
|
| 283 |
+
" return None\n",
|
| 284 |
+
"\n",
|
| 285 |
+
"# ================= 3. 优化后的采样逻辑 =================\n",
|
| 286 |
+
"\n",
|
| 287 |
+
"def sample_data():\n",
|
| 288 |
+
" if not os.path.exists(OUTPUT_DIR):\n",
|
| 289 |
+
" os.makedirs(OUTPUT_DIR)\n",
|
| 290 |
+
" \n",
|
| 291 |
+
" print(f\"=== 开始采样 | 目标总大小: {TOTAL_SIZE_GB} GB ===\")\n",
|
| 292 |
+
" \n",
|
| 293 |
+
" generated_files = []\n",
|
| 294 |
+
"\n",
|
| 295 |
+
" for config in DATASET_CONFIG:\n",
|
| 296 |
+
" target_bytes = int(config[\"ratio\"] * TOTAL_SIZE_GB * 1024**3)\n",
|
| 297 |
+
" output_file = os.path.join(OUTPUT_DIR, f\"{config['name']}.txt\")\n",
|
| 298 |
+
" generated_files.append(output_file)\n",
|
| 299 |
+
"\n",
|
| 300 |
+
" if os.path.exists(output_file) and os.path.getsize(output_file) >= target_bytes:\n",
|
| 301 |
+
" print(f\"[已完成] {config['name']} (跳过)\")\n",
|
| 302 |
+
" continue\n",
|
| 303 |
+
"\n",
|
| 304 |
+
" print(f\"\\n 正在下载: {config['name']}\")\n",
|
| 305 |
+
"\n",
|
| 306 |
+
" try:\n",
|
| 307 |
+
" ds = load_dataset(\n",
|
| 308 |
+
" config[\"hf_id\"], \n",
|
| 309 |
+
" name=config[\"subset\"], \n",
|
| 310 |
+
" split=\"train\", \n",
|
| 311 |
+
" streaming=True, \n",
|
| 312 |
+
" trust_remote_code=True\n",
|
| 313 |
+
" )\n",
|
| 314 |
+
" \n",
|
| 315 |
+
" # 【优化】调大 buffer_size 以获得更好的随机性 (内存允许的话)\n",
|
| 316 |
+
" ds_shuffled = ds.shuffle(buffer_size=100000, seed=42)\n",
|
| 317 |
+
" \n",
|
| 318 |
+
" current_bytes = 0\n",
|
| 319 |
+
" pbar = tqdm(total=target_bytes, unit='B', unit_scale=True)\n",
|
| 320 |
+
"\n",
|
| 321 |
+
" with open(output_file, \"w\", encoding=\"utf-8\") as f:\n",
|
| 322 |
+
" for sample in ds_shuffled:\n",
|
| 323 |
+
" try:\n",
|
| 324 |
+
" text = extract_text_from_sample(sample)\n",
|
| 325 |
+
" \n",
|
| 326 |
+
" # 【优化】严格过滤:如果是 None 或者太短,直接跳过\n",
|
| 327 |
+
" if text is None or len(text) < 20: \n",
|
| 328 |
+
" continue\n",
|
| 329 |
+
" \n",
|
| 330 |
+
" f.write(text + \"\\n<|endoftext|>\\n\")\n",
|
| 331 |
+
" \n",
|
| 332 |
+
" b_size = len(text.encode('utf-8'))\n",
|
| 333 |
+
" current_bytes += b_size\n",
|
| 334 |
+
" pbar.update(b_size)\n",
|
| 335 |
+
"\n",
|
| 336 |
+
" if current_bytes >= target_bytes:\n",
|
| 337 |
+
" break\n",
|
| 338 |
+
" except Exception:\n",
|
| 339 |
+
" continue \n",
|
| 340 |
+
" \n",
|
| 341 |
+
" pbar.close()\n",
|
| 342 |
+
"\n",
|
| 343 |
+
" except Exception as e:\n",
|
| 344 |
+
" print(f\"[错误] 处理 {config['name']} 失败: {e}\")\n",
|
| 345 |
+
" \n",
|
| 346 |
+
" return generated_files\n",
|
| 347 |
+
"\n",
|
| 348 |
+
"# ================= 4. 优化后的训练逻辑 =================\n",
|
| 349 |
+
"\n",
|
| 350 |
+
"def train_coder_tokenizer(files):\n",
|
| 351 |
+
" print(\"\\n🔨 === 开始训练 Coder Tokenizer (BPE) ===\")\n",
|
| 352 |
+
"\n",
|
| 353 |
+
" valid_files = [f for f in files if os.path.exists(f) and os.path.getsize(f) > 0]\n",
|
| 354 |
+
" if not valid_files:\n",
|
| 355 |
+
" print(\"没有有效的数据文件,终止训练。\")\n",
|
| 356 |
+
" return\n",
|
| 357 |
+
"\n",
|
| 358 |
+
" tokenizer = Tokenizer(models.BPE())\n",
|
| 359 |
+
" tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)\n",
|
| 360 |
+
" tokenizer.decoder = decoders.ByteLevel()\n",
|
| 361 |
+
"\n",
|
| 362 |
+
" special_tokens = [\n",
|
| 363 |
+
" \"<|endoftext|>\",\n",
|
| 364 |
+
" \"<pad>\",\n",
|
| 365 |
+
" \"<mask>\",\n",
|
| 366 |
+
" \"<|fim_prefix|>\",\n",
|
| 367 |
+
" \"<|fim_middle|>\",\n",
|
| 368 |
+
" \"<|fim_suffix|>\"\n",
|
| 369 |
+
" ]\n",
|
| 370 |
+
"\n",
|
| 371 |
+
" trainer = trainers.BpeTrainer(\n",
|
| 372 |
+
" vocab_size=49152,\n",
|
| 373 |
+
" min_frequency=2,\n",
|
| 374 |
+
" special_tokens=special_tokens,\n",
|
| 375 |
+
" initial_alphabet=pre_tokenizers.ByteLevel.alphabet()\n",
|
| 376 |
+
" )\n",
|
| 377 |
+
"\n",
|
| 378 |
+
" tokenizer.train(valid_files, trainer=trainer)\n",
|
| 379 |
+
"\n",
|
| 380 |
+
" save_path = \"./SEDDcoder_tokenizer\"\n",
|
| 381 |
+
" if not os.path.exists(save_path):\n",
|
| 382 |
+
" os.makedirs(save_path)\n",
|
| 383 |
+
"\n",
|
| 384 |
+
" fast_tokenizer = GPT2TokenizerFast(\n",
|
| 385 |
+
" tokenizer_object=tokenizer,\n",
|
| 386 |
+
" bos_token=\"<|endoftext|>\",\n",
|
| 387 |
+
" eos_token=\"<|endoftext|>\", \n",
|
| 388 |
+
" unk_token=\"<|endoftext|>\",\n",
|
| 389 |
+
" mask_token=\"<mask>\",\n",
|
| 390 |
+
" pad_token=\"<pad>\",\n",
|
| 391 |
+
" additional_special_tokens=[\"<|fim_prefix|>\", \"<|fim_middle|>\", \"<|fim_suffix|>\"]\n",
|
| 392 |
+
" )\n",
|
| 393 |
+
"\n",
|
| 394 |
+
" fast_tokenizer.save_pretrained(save_path)\n",
|
| 395 |
+
" print(f\"\\n模型已保存至: {save_path}\")\n",
|
| 396 |
+
" print(f\" 词表大小: {fast_tokenizer.vocab_size}\")\n",
|
| 397 |
+
"\n",
|
| 398 |
+
"if __name__ == \"__main__\":\n",
|
| 399 |
+
" files = sample_data()\n",
|
| 400 |
+
" train_coder_tokenizer(files)"
|
| 401 |
+
]
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"cell_type": "code",
|
| 405 |
+
"execution_count": 2,
|
| 406 |
+
"id": "d0bf613b-7344-4736-b142-e0b8461dfbd6",
|
| 407 |
+
"metadata": {},
|
| 408 |
+
"outputs": [
|
| 409 |
+
{
|
| 410 |
+
"name": "stdout",
|
| 411 |
+
"output_type": "stream",
|
| 412 |
+
"text": [
|
| 413 |
+
"Collecting hf_transfer\n",
|
| 414 |
+
" Downloading hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.7 kB)\n",
|
| 415 |
+
"Downloading hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.6 MB)\n",
|
| 416 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.6/3.6 MB\u001b[0m \u001b[31m29.1 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n",
|
| 417 |
+
"\u001b[?25hInstalling collected packages: hf_transfer\n",
|
| 418 |
+
"Successfully installed hf_transfer-0.1.9\n",
|
| 419 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
| 420 |
+
]
|
| 421 |
+
}
|
| 422 |
+
],
|
| 423 |
+
"source": [
|
| 424 |
+
"pip install hf_transfer"
|
| 425 |
+
]
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"cell_type": "code",
|
| 429 |
+
"execution_count": 7,
|
| 430 |
+
"id": "59fb07a4-1f0e-4ea8-8e71-90226bc33de2",
|
| 431 |
+
"metadata": {},
|
| 432 |
+
"outputs": [
|
| 433 |
+
{
|
| 434 |
+
"name": "stderr",
|
| 435 |
+
"output_type": "stream",
|
| 436 |
+
"text": [
|
| 437 |
+
"<>:10: SyntaxWarning: invalid escape sequence '\\s'\n",
|
| 438 |
+
"<>:10: SyntaxWarning: invalid escape sequence '\\s'\n",
|
| 439 |
+
"/tmp/ipykernel_3132/3547788621.py:10: SyntaxWarning: invalid escape sequence '\\s'\n",
|
| 440 |
+
" TEST_TEXT = \"\"\"def calculate_loss(y_true, y_pred):\n"
|
| 441 |
+
]
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"name": "stdout",
|
| 445 |
+
"output_type": "stream",
|
| 446 |
+
"text": [
|
| 447 |
+
"正在加载分词器...\n",
|
| 448 |
+
"✅ 加载成功!词表大小: 49152\n",
|
| 449 |
+
"\n",
|
| 450 |
+
"==================== 1. 肉眼观察切分效果 ====================\n",
|
| 451 |
+
"前 5000 个 Token 展示 (注意 Ġ 代表空格):\n",
|
| 452 |
+
"[ 0] def\n",
|
| 453 |
+
"[ 1] Ġcalculate\n",
|
| 454 |
+
"[ 2] _\n",
|
| 455 |
+
"[ 3] loss\n",
|
| 456 |
+
"[ 4] (\n",
|
| 457 |
+
"[ 5] y\n",
|
| 458 |
+
"[ 6] _\n",
|
| 459 |
+
"[ 7] true\n",
|
| 460 |
+
"[ 8] ,\n",
|
| 461 |
+
"[ 9] Ġy\n",
|
| 462 |
+
"[10] _\n",
|
| 463 |
+
"[11] pred\n",
|
| 464 |
+
"[12] ):\n",
|
| 465 |
+
"[13] Ċ\n",
|
| 466 |
+
"[14] ĠĠĠ\n",
|
| 467 |
+
"[15] Ġ#\n",
|
| 468 |
+
"[16] ĠThis\n",
|
| 469 |
+
"[17] Ġis\n",
|
| 470 |
+
"[18] Ġa\n",
|
| 471 |
+
"[19] Ġcomment\n",
|
| 472 |
+
"[20] Ċ\n",
|
| 473 |
+
"[21] ĠĠĠ\n",
|
| 474 |
+
"[22] Ġ#\n",
|
| 475 |
+
"[23] Ġ\n",
|
| 476 |
+
"[24] 计ç®Ĺ\n",
|
| 477 |
+
"[25] åĿĩ\n",
|
| 478 |
+
"[26] æĸ¹\n",
|
| 479 |
+
"[27] 误差\n",
|
| 480 |
+
"[28] Ċ\n",
|
| 481 |
+
"[29] ĠĠĠ\n",
|
| 482 |
+
"[30] Ġerror\n",
|
| 483 |
+
"[31] Ġ=\n",
|
| 484 |
+
"[32] Ġy\n",
|
| 485 |
+
"[33] _\n",
|
| 486 |
+
"[34] true\n",
|
| 487 |
+
"[35] Ġ-\n",
|
| 488 |
+
"[36] Ġy\n",
|
| 489 |
+
"[37] _\n",
|
| 490 |
+
"[38] pred\n",
|
| 491 |
+
"[39] Ċ\n",
|
| 492 |
+
"[40] ĠĠĠ\n",
|
| 493 |
+
"[41] Ġmse\n",
|
| 494 |
+
"[42] Ġ=\n",
|
| 495 |
+
"[43] Ġnp\n",
|
| 496 |
+
"[44] .\n",
|
| 497 |
+
"[45] mean\n",
|
| 498 |
+
"[46] (\n",
|
| 499 |
+
"[47] error\n",
|
| 500 |
+
"[48] Ġ**\n",
|
| 501 |
+
"[49] Ġ2\n",
|
| 502 |
+
"[50] )\n",
|
| 503 |
+
"[51] Ċ\n",
|
| 504 |
+
"[52] ĠĠĠ\n",
|
| 505 |
+
"[53] Ġreturn\n",
|
| 506 |
+
"[54] Ġmse\n",
|
| 507 |
+
"[55] Ċ\n",
|
| 508 |
+
"[56] Ċ\n",
|
| 509 |
+
"[57] #\n",
|
| 510 |
+
"[58] ĠMath\n",
|
| 511 |
+
"[59] Ġformula\n",
|
| 512 |
+
"[60] :\n",
|
| 513 |
+
"[61] Ċ\n",
|
| 514 |
+
"[62] #\n",
|
| 515 |
+
"[63] Ġf\n",
|
| 516 |
+
"[64] (\n",
|
| 517 |
+
"[65] x\n",
|
| 518 |
+
"[66] )\n",
|
| 519 |
+
"[67] Ġ=\n",
|
| 520 |
+
"[68] Ġ\n",
|
| 521 |
+
"[69] Č\n",
|
| 522 |
+
"[70] rac\n",
|
| 523 |
+
"[71] {\n",
|
| 524 |
+
"[72] 1\n",
|
| 525 |
+
"[73] }{\\\n",
|
| 526 |
+
"[74] sqrt\n",
|
| 527 |
+
"[75] {\n",
|
| 528 |
+
"[76] 2\n",
|
| 529 |
+
"[77] \\\n",
|
| 530 |
+
"[78] pi\n",
|
| 531 |
+
"[79] \\\n",
|
| 532 |
+
"[80] sigma\n",
|
| 533 |
+
"[81] ^\n",
|
| 534 |
+
"[82] 2\n",
|
| 535 |
+
"[83] }}\n",
|
| 536 |
+
"[84] Ġe\n",
|
| 537 |
+
"[85] ^{-\n",
|
| 538 |
+
"[86] Č\n",
|
| 539 |
+
"[87] rac\n",
|
| 540 |
+
"[88] {(\n",
|
| 541 |
+
"[89] x\n",
|
| 542 |
+
"[90] -\\\n",
|
| 543 |
+
"[91] mu\n",
|
| 544 |
+
"[92] )^\n",
|
| 545 |
+
"[93] 2\n",
|
| 546 |
+
"[94] }{\n",
|
| 547 |
+
"[95] 2\n",
|
| 548 |
+
"[96] \\\n",
|
| 549 |
+
"[97] sigma\n",
|
| 550 |
+
"[98] ^\n",
|
| 551 |
+
"[99] 2\n",
|
| 552 |
+
"[100] }}\n",
|
| 553 |
+
"[101] Ċ\n",
|
| 554 |
+
"\n",
|
| 555 |
+
"==================== 2. 无损还原测试 ====================\n",
|
| 556 |
+
"✅ 完美还原!输入与输出完全一致。\n",
|
| 557 |
+
"\n",
|
| 558 |
+
"==================== 3. 关键能力核查 ====================\n",
|
| 559 |
+
"测试 4个空格缩进: ['ĠĠĠĠ']\n",
|
| 560 |
+
" -> ⭐ 优秀!4个空格被识别为 1 个 token。\n",
|
| 561 |
+
"\n",
|
| 562 |
+
"测试关键字完整性:\n",
|
| 563 |
+
" ' def' -> ['Ġdef']\n",
|
| 564 |
+
" ' return' -> ['Ġreturn']\n",
|
| 565 |
+
" ' import' -> ['Ġimport']\n",
|
| 566 |
+
" ' class' -> ['Ġclass']\n",
|
| 567 |
+
" ' numpy' -> ['Ġnumpy']\n",
|
| 568 |
+
"\n",
|
| 569 |
+
"测试中文: '计算均方误差' -> ['计ç®Ĺ', 'åĿĩ', 'æĸ¹', '误差']\n",
|
| 570 |
+
" -> ⭐ 还可以,常用中文词汇被合并了。\n",
|
| 571 |
+
"\n",
|
| 572 |
+
"==================== 4. 压缩率 PK (vs GPT-2 标准) ====================\n",
|
| 573 |
+
"同一段文本 Token 数量:\n",
|
| 574 |
+
" GPT-2 原版: 131\n",
|
| 575 |
+
" 你的分词器: 102\n",
|
| 576 |
+
"\n",
|
| 577 |
+
"🚀 结论: 你的分词器比 GPT-2 节省了 22.14% 的长度!\n",
|
| 578 |
+
" (对于代码模型,这意味着能读更长的上下文,训练速度更快)\n"
|
| 579 |
+
]
|
| 580 |
+
}
|
| 581 |
+
],
|
| 582 |
+
"source": [
|
| 583 |
+
"from transformers import AutoTokenizer\n",
|
| 584 |
+
"import json\n",
|
| 585 |
+
"\n",
|
| 586 |
+
"# ================= 配置 =================\n",
|
| 587 |
+
"# 指向你刚才保存的文件夹路径\n",
|
| 588 |
+
"TOKENIZER_PATH = \"./SEDDcoder_tokenizer\"\n",
|
| 589 |
+
"\n",
|
| 590 |
+
"# 准备测试文本:包含 Python 代码、数学公式、中文\n",
|
| 591 |
+
"# 这正好对应你训练的三个主要领域\n",
|
| 592 |
+
"TEST_TEXT = \"\"\"def calculate_loss(y_true, y_pred):\n",
|
| 593 |
+
" # This is a comment\n",
|
| 594 |
+
" # 计算均方误差\n",
|
| 595 |
+
" error = y_true - y_pred\n",
|
| 596 |
+
" mse = np.mean(error ** 2)\n",
|
| 597 |
+
" return mse\n",
|
| 598 |
+
"\n",
|
| 599 |
+
"# Math formula:\n",
|
| 600 |
+
"# f(x) = \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\n",
|
| 601 |
+
"\"\"\"\n",
|
| 602 |
+
"\n",
|
| 603 |
+
"# ================= 开始测试 =================\n",
|
| 604 |
+
"\n",
|
| 605 |
+
"def print_separator(title):\n",
|
| 606 |
+
" print(f\"\\n{'='*20} {title} {'='*20}\")\n",
|
| 607 |
+
"\n",
|
| 608 |
+
"try:\n",
|
| 609 |
+
" # 1. 加载分词器\n",
|
| 610 |
+
" print(\"正在加载分词器...\")\n",
|
| 611 |
+
" tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH)\n",
|
| 612 |
+
" print(f\"✅ 加载成功!词表大小: {tokenizer.vocab_size}\")\n",
|
| 613 |
+
"\n",
|
| 614 |
+
" # 2. 基础编码测试 (Eye Test)\n",
|
| 615 |
+
" print_separator(\"1. 肉眼观察切分效果\")\n",
|
| 616 |
+
" tokens = tokenizer.tokenize(TEST_TEXT)\n",
|
| 617 |
+
" \n",
|
| 618 |
+
" # 打印前 30 个 token 看看\n",
|
| 619 |
+
" print(\"前 5000 个 Token 展示 (注意 Ġ 代表空格):\")\n",
|
| 620 |
+
" for i, t in enumerate(tokens[:5000]):\n",
|
| 621 |
+
" print(f\"[{i:2d}] {t}\")\n",
|
| 622 |
+
"\n",
|
| 623 |
+
" # 3. 还原测试 (Round-Trip)\n",
|
| 624 |
+
" print_separator(\"2. 无损还原测试\")\n",
|
| 625 |
+
" token_ids = tokenizer.encode(TEST_TEXT)\n",
|
| 626 |
+
" decoded_text = tokenizer.decode(token_ids)\n",
|
| 627 |
+
" \n",
|
| 628 |
+
" if decoded_text == TEST_TEXT:\n",
|
| 629 |
+
" print(\"✅ 完美还原!输入与输出完全一致。\")\n",
|
| 630 |
+
" else:\n",
|
| 631 |
+
" print(\"❌ 还原失败!分词器可能丢失了信息。\")\n",
|
| 632 |
+
" # 调试用:\n",
|
| 633 |
+
" # print(\"原文:\", repr(TEST_TEXT))\n",
|
| 634 |
+
" # print(\"还原:\", repr(decoded_text))\n",
|
| 635 |
+
"\n",
|
| 636 |
+
" # 4. 关键能力检查\n",
|
| 637 |
+
" print_separator(\"3. 关键能力核查\")\n",
|
| 638 |
+
" \n",
|
| 639 |
+
" # Check 1: 缩进是否被压缩?\n",
|
| 640 |
+
" # 在 Python 中,4个空格非常常见。好的 Coder Tokenizer 会把它变成 1 个 token\n",
|
| 641 |
+
" indent_text = \" \" # 4个空格\n",
|
| 642 |
+
" indent_tokens = tokenizer.tokenize(indent_text)\n",
|
| 643 |
+
" print(f\"测试 4个空格缩进: {indent_tokens}\")\n",
|
| 644 |
+
" if len(indent_tokens) == 1:\n",
|
| 645 |
+
" print(\" -> ⭐ 优秀!4个空格被识别为 1 个 token。\")\n",
|
| 646 |
+
" else:\n",
|
| 647 |
+
" print(f\" -> 普通。4个空格被切成了 {len(indent_tokens)} 个 token。\")\n",
|
| 648 |
+
"\n",
|
| 649 |
+
" # Check 2: 常用关键字是否完整?\n",
|
| 650 |
+
" keywords = [\"def\", \"return\", \"import\", \"class\", \"numpy\"]\n",
|
| 651 |
+
" print(\"\\n测试关键字完整性:\")\n",
|
| 652 |
+
" for kw in keywords:\n",
|
| 653 |
+
" # 注意:要在前面加个空格,模拟代码中的 \" def\" 场景\n",
|
| 654 |
+
" t = tokenizer.tokenize(\" \" + kw)\n",
|
| 655 |
+
" print(f\" ' {kw}' -> {t}\")\n",
|
| 656 |
+
"\n",
|
| 657 |
+
" # Check 3: 中文是否乱码?\n",
|
| 658 |
+
" chinese_sample = \"计算均方误差\"\n",
|
| 659 |
+
" cn_tokens = tokenizer.tokenize(chinese_sample)\n",
|
| 660 |
+
" print(f\"\\n测试中文: '{chinese_sample}' -> {cn_tokens}\")\n",
|
| 661 |
+
" if len(cn_tokens) <= 4:\n",
|
| 662 |
+
" print(\" -> ⭐ 还可以,常用中文词汇被合并了。\")\n",
|
| 663 |
+
" else:\n",
|
| 664 |
+
" print(\" -> 略碎,可能是因为中文语料占比只有 5%。\")\n",
|
| 665 |
+
"\n",
|
| 666 |
+
" # 5. 简单压缩率对比 (vs GPT-2)\n",
|
| 667 |
+
" print_separator(\"4. 压缩率 PK (vs GPT-2 标准)\")\n",
|
| 668 |
+
" from transformers import GPT2Tokenizer\n",
|
| 669 |
+
" try:\n",
|
| 670 |
+
" gpt2_tok = GPT2Tokenizer.from_pretrained(\"gpt2\")\n",
|
| 671 |
+
" len_gpt2 = len(gpt2_tok.encode(TEST_TEXT))\n",
|
| 672 |
+
" len_yours = len(token_ids)\n",
|
| 673 |
+
" \n",
|
| 674 |
+
" print(f\"同一段文本 Token 数量:\")\n",
|
| 675 |
+
" print(f\" GPT-2 原版: {len_gpt2}\")\n",
|
| 676 |
+
" print(f\" 你的分词器: {len_yours}\")\n",
|
| 677 |
+
" \n",
|
| 678 |
+
" improvement = (len_gpt2 - len_yours) / len_gpt2 * 100\n",
|
| 679 |
+
" print(f\"\\n🚀 结论: 你的分词器比 GPT-2 节省了 {improvement:.2f}% 的长度!\")\n",
|
| 680 |
+
" if improvement > 10:\n",
|
| 681 |
+
" print(\" (对于代码模型,这意味着能读更长的上下文,训练速度更快)\")\n",
|
| 682 |
+
" \n",
|
| 683 |
+
" except Exception as e:\n",
|
| 684 |
+
" print(\"跳过对比 (未安装或无法连接 GPT-2):\", e)\n",
|
| 685 |
+
"\n",
|
| 686 |
+
"except Exception as e:\n",
|
| 687 |
+
" print(f\"\\n❌ 发生错误: {e}\")\n",
|
| 688 |
+
" print(\"请检查路径是否正确,或是否安装了 tokenizers 库。\")"
|
| 689 |
+
]
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"cell_type": "code",
|
| 693 |
+
"execution_count": 8,
|
| 694 |
+
"id": "ac1a0ffb-f6fa-4c65-9589-fc06b1e09e0c",
|
| 695 |
+
"metadata": {},
|
| 696 |
+
"outputs": [
|
| 697 |
+
{
|
| 698 |
+
"name": "stdout",
|
| 699 |
+
"output_type": "stream",
|
| 700 |
+
"text": [
|
| 701 |
+
"正在加载 GPT-2 官方分词器...\n",
|
| 702 |
+
"\n",
|
| 703 |
+
"=== GPT-2 (官方) 的切分结果 ===\n",
|
| 704 |
+
"['def', 'Ġcalculate', '_', 'loss', '(', 'y', '_', 'true', ',', 'Ġy', '_', 'pred', '):', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'Ġ#', 'Ġè', '®', '¡', 'ç', '®', 'Ĺ', 'å', 'Ŀ', 'ĩ', 'æĸ¹', 'è¯', '¯', 'å·', '®', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'Ġreturn', 'Ġm', 'se']\n",
|
| 705 |
+
"Token 数量: 39\n",
|
| 706 |
+
"\n",
|
| 707 |
+
"=== 你的 Coder 分词器 的切分结果 ===\n",
|
| 708 |
+
"['def', 'Ġcalculate', '_', 'loss', '(', 'y', '_', 'true', ',', 'Ġy', '_', 'pred', '):', 'Ċ', 'ĠĠĠ', 'Ġ#', 'Ġ', '计ç®Ĺ', 'åĿĩ', 'æĸ¹', '误差', 'Ċ', 'ĠĠĠ', 'Ġreturn', 'Ġmse']\n",
|
| 709 |
+
"Token 数量: 25\n"
|
| 710 |
+
]
|
| 711 |
+
}
|
| 712 |
+
],
|
| 713 |
+
"source": [
|
| 714 |
+
"from transformers import AutoTokenizer\n",
|
| 715 |
+
"\n",
|
| 716 |
+
"# 1. 加载官方 GPT-2 分词器\n",
|
| 717 |
+
"print(\"正在加载 GPT-2 官方分词器...\")\n",
|
| 718 |
+
"gpt2_tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n",
|
| 719 |
+
"\n",
|
| 720 |
+
"# 2. 同样的测试文本\n",
|
| 721 |
+
"text = \"\"\"def calculate_loss(y_true, y_pred):\n",
|
| 722 |
+
" # 计算均方误差\n",
|
| 723 |
+
" return mse\"\"\"\n",
|
| 724 |
+
"\n",
|
| 725 |
+
"# 3. 打印 GPT-2 的切分结果\n",
|
| 726 |
+
"print(\"\\n=== GPT-2 (官方) 的切分结果 ===\")\n",
|
| 727 |
+
"gpt2_tokens = gpt2_tokenizer.tokenize(text)\n",
|
| 728 |
+
"print(gpt2_tokens)\n",
|
| 729 |
+
"print(f\"Token 数量: {len(gpt2_tokens)}\")\n",
|
| 730 |
+
"\n",
|
| 731 |
+
"# -------------------------------------------\n",
|
| 732 |
+
"# 如果你想对比你自己的 (假设路径在 ./SEDDcoder_tokenizer)\n",
|
| 733 |
+
"# -------------------------------------------\n",
|
| 734 |
+
"try:\n",
|
| 735 |
+
" print(\"\\n=== 你的 Coder 分词器 的切分结果 ===\")\n",
|
| 736 |
+
" my_tokenizer = AutoTokenizer.from_pretrained(\"./SEDDcoder_tokenizer\")\n",
|
| 737 |
+
" my_tokens = my_tokenizer.tokenize(text)\n",
|
| 738 |
+
" print(my_tokens)\n",
|
| 739 |
+
" print(f\"Token 数量: {len(my_tokens)}\")\n",
|
| 740 |
+
"except:\n",
|
| 741 |
+
" pass"
|
| 742 |
+
]
|
| 743 |
+
},
|
| 744 |
+
{
|
| 745 |
+
"cell_type": "code",
|
| 746 |
+
"execution_count": 11,
|
| 747 |
+
"id": "679befca-f975-499d-92f7-c3b5ea8d58b7",
|
| 748 |
+
"metadata": {},
|
| 749 |
+
"outputs": [
|
| 750 |
+
{
|
| 751 |
+
"name": "stdout",
|
| 752 |
+
"output_type": "stream",
|
| 753 |
+
"text": [
|
| 754 |
+
"【原始文本】: kwargs\n",
|
| 755 |
+
"\n",
|
| 756 |
+
"--- A. 查看切分并手动还原 ---\n",
|
| 757 |
+
"【切分结果】: ['kwargs']\n",
|
| 758 |
+
"【还原结果】: kwargs\n",
|
| 759 |
+
"\n",
|
| 760 |
+
"--- B. 模拟模型输入输出 (ID 层面) ---\n",
|
| 761 |
+
"【Token IDs】: [2251]\n",
|
| 762 |
+
"【解码结果】: kwargs\n",
|
| 763 |
+
"\n",
|
| 764 |
+
"--- 验证 ---\n",
|
| 765 |
+
"✅ 成功!能够完美还原(无损)。\n"
|
| 766 |
+
]
|
| 767 |
+
}
|
| 768 |
+
],
|
| 769 |
+
"source": [
|
| 770 |
+
"from transformers import AutoTokenizer\n",
|
| 771 |
+
"\n",
|
| 772 |
+
"# 1. 加载你训练好的分词器\n",
|
| 773 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"./SEDDcoder_tokenizer\")\n",
|
| 774 |
+
"\n",
|
| 775 |
+
"# 2. 准备一段测试文本 (包含代码、中文、英文)\n",
|
| 776 |
+
"text = \"kwargs\"\n",
|
| 777 |
+
"print(f\"【原始文本】: {text}\")\n",
|
| 778 |
+
"\n",
|
| 779 |
+
"# ================= 方法 A: 仅在字符串层面操作 =================\n",
|
| 780 |
+
"print(\"\\n--- A. 查看切分并手动还原 ---\")\n",
|
| 781 |
+
"\n",
|
| 782 |
+
"# 第一步:切分 (Text -> Tokens)\n",
|
| 783 |
+
"# 这一步让你看到它是怎么“下刀”的\n",
|
| 784 |
+
"tokens = tokenizer.tokenize(text)\n",
|
| 785 |
+
"print(f\"【切分结果】: {tokens}\")\n",
|
| 786 |
+
"# 你会看到类似: ['def', 'Ġcalculate', '_', 'loss', '(', '计ç®', ... ]\n",
|
| 787 |
+
"\n",
|
| 788 |
+
"# 第二步:还原 (Tokens -> Text)\n",
|
| 789 |
+
"# 这一步把 list 里的怪符号合并回正常字符串\n",
|
| 790 |
+
"restored_text_from_tokens = tokenizer.convert_tokens_to_string(tokens)\n",
|
| 791 |
+
"print(f\"【还原结果】: {restored_text_from_tokens}\")\n",
|
| 792 |
+
"\n",
|
| 793 |
+
"\n",
|
| 794 |
+
"# ================= 方法 B: 模拟模型真实工作流程 (推荐) =================\n",
|
| 795 |
+
"print(\"\\n--- B. 模拟模型输入输出 (ID 层面) ---\")\n",
|
| 796 |
+
"\n",
|
| 797 |
+
"# 第一步:编码 (Text -> IDs)\n",
|
| 798 |
+
"# 模型看不懂字符串,只看懂数字。这是真正喂给 GPT/Llama 的东西\n",
|
| 799 |
+
"input_ids = tokenizer.encode(text)\n",
|
| 800 |
+
"print(f\"【Token IDs】: {input_ids}\")\n",
|
| 801 |
+
"\n",
|
| 802 |
+
"# 第二步:解码 (IDs -> Text)\n",
|
| 803 |
+
"# 模型输出数字后,我们用这个方法变回人类语言\n",
|
| 804 |
+
"decoded_text = tokenizer.decode(input_ids)\n",
|
| 805 |
+
"print(f\"【解码结果】: {decoded_text}\")\n",
|
| 806 |
+
"\n",
|
| 807 |
+
"# ================= 验证 =================\n",
|
| 808 |
+
"print(\"\\n--- 验证 ---\")\n",
|
| 809 |
+
"if text == decoded_text:\n",
|
| 810 |
+
" print(\"✅ 成功!能够完美还原(无损)。\")\n",
|
| 811 |
+
"else:\n",
|
| 812 |
+
" print(\"❌ 失败!还原后与原文不一致。\")"
|
| 813 |
+
]
|
| 814 |
+
},
|
| 815 |
+
{
|
| 816 |
+
"cell_type": "code",
|
| 817 |
+
"execution_count": 23,
|
| 818 |
+
"id": "ffa8642a-c6e0-47fd-b92a-285eb3e9b9f0",
|
| 819 |
+
"metadata": {},
|
| 820 |
+
"outputs": [
|
| 821 |
+
{
|
| 822 |
+
"name": "stdout",
|
| 823 |
+
"output_type": "stream",
|
| 824 |
+
"text": [
|
| 825 |
+
"正在加载分词器...\n",
|
| 826 |
+
"1. ./SEDDcoder_tokenizer\n",
|
| 827 |
+
"2. gpt2\n",
|
| 828 |
+
"✅ 加载完成!\n",
|
| 829 |
+
"\n",
|
| 830 |
+
"\n",
|
| 831 |
+
"📊 === 总体性能排行榜 (Token 越少越好) ===\n",
|
| 832 |
+
"+---------------------------------+------------+------------+---------------+\n",
|
| 833 |
+
"| 场景 | 你的长度 | GPT2长度 | 🚀 节省空间 |\n",
|
| 834 |
+
"+=================================+============+============+===============+\n",
|
| 835 |
+
"| 1. 真实Python函数 (含Docstring) | 159 | 253 | 37.2% |\n",
|
| 836 |
+
"+---------------------------------+------------+------------+---------------+\n",
|
| 837 |
+
"\n",
|
| 838 |
+
"\n",
|
| 839 |
+
"🔍 === 详细切分对比 ===\n",
|
| 840 |
+
"\n",
|
| 841 |
+
"============================================================\n",
|
| 842 |
+
"【场景 1】: 1. 真实Python函数 (含Docstring)\n",
|
| 843 |
+
"------------------------------------------------------------\n",
|
| 844 |
+
"[GPT-2] (253 tokens):\n",
|
| 845 |
+
"['def', 'Ġget', '_', 'n', 'ested', '_', 'attribute', '(', 'obj', ':', 'Ġobject', ',', 'Ġdot', '_', 'expr', ':', 'Ġstr', ')', 'Ġ->', 'Ġobject', ':', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'Ġ\"\"\"', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'ĠRet', 'rie', 'ves', 'Ġa', 'Ġnested', 'Ġattribute', 'Ġfrom', 'Ġan', 'Ġobject', 'Ġusing', 'Ġa', 'Ġdot', 'Ġnotation', 'Ġstring', '.', 'ĊĊ', 'Ġ', 'Ġ', 'Ġ', 'ĠAr'] ... (只显示前50个)\n",
|
| 846 |
+
"\n",
|
| 847 |
+
"[Yours] (159 tokens):\n",
|
| 848 |
+
"['def', 'Ġget', '_', 'nested', '_', 'attribute', '(', 'obj', ':', 'Ġobject', ',', 'Ġdot', '_', 'expr', ':', 'Ġstr', ')', 'Ġ->', 'Ġobject', ':', 'Ċ', 'ĠĠĠ', 'Ġ\"\"\"', 'Ċ', 'ĠĠĠ', 'ĠRetrieves', 'Ġa', 'Ġnested', 'Ġattribute', 'Ġfrom', 'Ġan', 'Ġobject', 'Ġusing', 'Ġa', 'Ġdot', 'Ġnotation', 'Ġstring', '.', 'Ċ', 'Ċ', 'ĠĠĠ', 'ĠArgs', ':', 'Ċ', 'ĠĠĠĠĠĠĠ', 'Ġobj', 'Ġ(', 'object', '):', 'ĠThe'] ... (只显示前50个)\n",
|
| 849 |
+
"------------------------------------------------------------\n",
|
| 850 |
+
">>> 结论: 你比 GPT-2 少用了 94 个 Token (节省 37.2%)\n",
|
| 851 |
+
"\n"
|
| 852 |
+
]
|
| 853 |
+
}
|
| 854 |
+
],
|
| 855 |
+
"source": [
|
| 856 |
+
"import pandas as pd\n",
|
| 857 |
+
"from transformers import AutoTokenizer\n",
|
| 858 |
+
"\n",
|
| 859 |
+
"# ================= 1. 配置路径 =================\n",
|
| 860 |
+
"MY_TOKENIZER_PATH = \"./SEDDcoder_tokenizer\"\n",
|
| 861 |
+
"COMPARE_MODEL = \"gpt2\"\n",
|
| 862 |
+
"\n",
|
| 863 |
+
"print(f\"正在加载分词器...\\n1. {MY_TOKENIZER_PATH}\\n2. {COMPARE_MODEL}\")\n",
|
| 864 |
+
"try:\n",
|
| 865 |
+
" my_tok = AutoTokenizer.from_pretrained(MY_TOKENIZER_PATH)\n",
|
| 866 |
+
" gpt2_tok = AutoTokenizer.from_pretrained(COMPARE_MODEL)\n",
|
| 867 |
+
" print(\"✅ 加载完成!\\n\")\n",
|
| 868 |
+
"except Exception as e:\n",
|
| 869 |
+
" print(f\"❌ 加载失败: {e}\")\n",
|
| 870 |
+
" exit()\n",
|
| 871 |
+
"\n",
|
| 872 |
+
"# ================= 2. 定义测试用例 (已修复引号和缩进) =================\n",
|
| 873 |
+
"test_cases = [\n",
|
| 874 |
+
" {\n",
|
| 875 |
+
" \"场景\": \"1. 真实Python函数 (含Docstring)\",\n",
|
| 876 |
+
" # 注意:这里使用了三个单引号 ''' 来包裹整个字符串\n",
|
| 877 |
+
" # 这样内部的 \"\"\" 和换行符就不会报错了\n",
|
| 878 |
+
" \"文本\": '''def get_nested_attribute(obj: object, dot_expr: str) -> object:\n",
|
| 879 |
+
" \"\"\"\n",
|
| 880 |
+
" Retrieves a nested attribute from an object using a dot notation string.\n",
|
| 881 |
+
"\n",
|
| 882 |
+
" Args:\n",
|
| 883 |
+
" obj (object): The object to retrieve the attribute from.\n",
|
| 884 |
+
" dot_expr (str): The dot notation string representing the attribute path.\n",
|
| 885 |
+
"\n",
|
| 886 |
+
" Returns:\n",
|
| 887 |
+
" object: The retrieved attribute value.\n",
|
| 888 |
+
" \"\"\"\n",
|
| 889 |
+
" for component in dot_expr.split(\".\"):\n",
|
| 890 |
+
" component = component.strip()\n",
|
| 891 |
+
" if not component:\n",
|
| 892 |
+
" continue\n",
|
| 893 |
+
" try:\n",
|
| 894 |
+
" obj = getattr(obj, component)\n",
|
| 895 |
+
" except AttributeError:\n",
|
| 896 |
+
" raise ValueError(f\"Attribute '{component}' does not exist\")\n",
|
| 897 |
+
" return obj'''\n",
|
| 898 |
+
" },\n",
|
| 899 |
+
"]\n",
|
| 900 |
+
"\n",
|
| 901 |
+
"# ================= 3. 执行对比逻辑 =================\n",
|
| 902 |
+
"results = []\n",
|
| 903 |
+
"detailed_logs = []\n",
|
| 904 |
+
"\n",
|
| 905 |
+
"for idx, case in enumerate(test_cases):\n",
|
| 906 |
+
" text = case[\"文本\"]\n",
|
| 907 |
+
" scenario = case[\"场景\"]\n",
|
| 908 |
+
" \n",
|
| 909 |
+
" # 1. 切分\n",
|
| 910 |
+
" tokens_mine = my_tok.tokenize(text)\n",
|
| 911 |
+
" tokens_gpt2 = gpt2_tok.tokenize(text)\n",
|
| 912 |
+
" \n",
|
| 913 |
+
" # 2. 计数\n",
|
| 914 |
+
" count_mine = len(tokens_mine)\n",
|
| 915 |
+
" count_gpt2 = len(tokens_gpt2)\n",
|
| 916 |
+
" \n",
|
| 917 |
+
" # 3. 计算节省率\n",
|
| 918 |
+
" if count_gpt2 > 0:\n",
|
| 919 |
+
" saving = (count_gpt2 - count_mine) / count_gpt2 * 100\n",
|
| 920 |
+
" else:\n",
|
| 921 |
+
" saving = 0.0\n",
|
| 922 |
+
" \n",
|
| 923 |
+
" # 存入摘要表\n",
|
| 924 |
+
" results.append({\n",
|
| 925 |
+
" \"场景\": scenario,\n",
|
| 926 |
+
" \"你的长度\": count_mine,\n",
|
| 927 |
+
" \"GPT2长度\": count_gpt2,\n",
|
| 928 |
+
" \"🚀 节省空间\": f\"{saving:.1f}%\"\n",
|
| 929 |
+
" })\n",
|
| 930 |
+
" \n",
|
| 931 |
+
" # 存入详细对比日志\n",
|
| 932 |
+
" detailed_logs.append(f\"\"\"\n",
|
| 933 |
+
"{'='*60}\n",
|
| 934 |
+
"【场景 {idx+1}】: {scenario}\n",
|
| 935 |
+
"------------------------------------------------------------\n",
|
| 936 |
+
"[GPT-2] ({count_gpt2} tokens):\n",
|
| 937 |
+
"{tokens_gpt2[:50]} ... (只显示前50个)\n",
|
| 938 |
+
"\n",
|
| 939 |
+
"[Yours] ({count_mine} tokens):\n",
|
| 940 |
+
"{tokens_mine[:50]} ... (只显示前50个)\n",
|
| 941 |
+
"------------------------------------------------------------\n",
|
| 942 |
+
">>> 结论: 你比 GPT-2 少用了 {count_gpt2 - count_mine} 个 Token (节省 {saving:.1f}%)\n",
|
| 943 |
+
"\"\"\")\n",
|
| 944 |
+
"\n",
|
| 945 |
+
"# ================= 4. 输出结果 =================\n",
|
| 946 |
+
"df = pd.DataFrame(results)\n",
|
| 947 |
+
"print(\"\\n📊 === 总体性能排行榜 (Token 越少越好) ===\")\n",
|
| 948 |
+
"# 尝试使用 tabulate 打印美观表格,如果没有安装则默认打印\n",
|
| 949 |
+
"try:\n",
|
| 950 |
+
" print(df.to_markdown(index=False, tablefmt=\"grid\"))\n",
|
| 951 |
+
"except:\n",
|
| 952 |
+
" print(df)\n",
|
| 953 |
+
"\n",
|
| 954 |
+
"print(\"\\n\\n🔍 === 详细切分对比 ===\")\n",
|
| 955 |
+
"for log in detailed_logs:\n",
|
| 956 |
+
" print(log)"
|
| 957 |
+
]
|
| 958 |
+
},
|
| 959 |
+
{
|
| 960 |
+
"cell_type": "code",
|
| 961 |
+
"execution_count": 16,
|
| 962 |
+
"id": "33319993-5a0b-4506-ba05-f0530bde8063",
|
| 963 |
+
"metadata": {},
|
| 964 |
+
"outputs": [
|
| 965 |
+
{
|
| 966 |
+
"name": "stderr",
|
| 967 |
+
"output_type": "stream",
|
| 968 |
+
"text": [
|
| 969 |
+
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
| 970 |
+
"To disable this warning, you can either:\n",
|
| 971 |
+
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
| 972 |
+
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
|
| 973 |
+
]
|
| 974 |
+
},
|
| 975 |
+
{
|
| 976 |
+
"name": "stdout",
|
| 977 |
+
"output_type": "stream",
|
| 978 |
+
"text": [
|
| 979 |
+
"Collecting tabulate\n",
|
| 980 |
+
" Downloading tabulate-0.9.0-py3-none-any.whl.metadata (34 kB)\n",
|
| 981 |
+
"Downloading tabulate-0.9.0-py3-none-any.whl (35 kB)\n",
|
| 982 |
+
"Installing collected packages: tabulate\n",
|
| 983 |
+
"Successfully installed tabulate-0.9.0\n",
|
| 984 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
| 985 |
+
]
|
| 986 |
+
}
|
| 987 |
+
],
|
| 988 |
+
"source": [
|
| 989 |
+
"pip install tabulate"
|
| 990 |
+
]
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"cell_type": "code",
|
| 994 |
+
"execution_count": 5,
|
| 995 |
+
"id": "d6de1c39-42e2-494e-a354-9a099cbfff66",
|
| 996 |
+
"metadata": {},
|
| 997 |
+
"outputs": [],
|
| 998 |
+
"source": [
|
| 999 |
+
"import os\n",
|
| 1000 |
+
"import glob\n",
|
| 1001 |
+
"from datasets import load_dataset\n",
|
| 1002 |
+
"from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers\n",
|
| 1003 |
+
"from tqdm import tqdm\n",
|
| 1004 |
+
"from transformers import GPT2TokenizerFast\n",
|
| 1005 |
+
"\n",
|
| 1006 |
+
"# ================= 1. 全局配置 =================\n",
|
| 1007 |
+
"\n",
|
| 1008 |
+
"# 总采样大小: 5GB\n",
|
| 1009 |
+
"TOTAL_SIZE_GB = 5\n",
|
| 1010 |
+
"OUTPUT_DIR = \"./tokenizer_train_data_5gb\"\n",
|
| 1011 |
+
"\n",
|
| 1012 |
+
"# 数据集配比配置\n",
|
| 1013 |
+
"DATASET_CONFIG = [\n",
|
| 1014 |
+
" # --- 1. 代码 (70% = 3.5GB) ---\n",
|
| 1015 |
+
" # 指定使用 Swallow-Code 的 \"exp11-scor\" 子集\n",
|
| 1016 |
+
" {\n",
|
| 1017 |
+
" \"name\": \"swallow_code\",\n",
|
| 1018 |
+
" \"hf_id\": \"tokyotech-llm/swallow-code\",\n",
|
| 1019 |
+
" \"subset\": \"exp11-scor\", \n",
|
| 1020 |
+
" \"ratio\": 0.70,\n",
|
| 1021 |
+
" },\n",
|
| 1022 |
+
" \n",
|
| 1023 |
+
" # --- 2. 数学 (10% = 0.5GB) ---\n",
|
| 1024 |
+
" # Swallow-Math\n",
|
| 1025 |
+
" {\n",
|
| 1026 |
+
" \"name\": \"swallow_math\",\n",
|
| 1027 |
+
" \"hf_id\": \"tokyotech-llm/swallow-math\",\n",
|
| 1028 |
+
" \"subset\": None, # 默认配置\n",
|
| 1029 |
+
" \"ratio\": 0.10,\n",
|
| 1030 |
+
" },\n",
|
| 1031 |
+
" \n",
|
| 1032 |
+
" # --- 3. 英文通用 (15% = 0.75GB) ---\n",
|
| 1033 |
+
" # Fineweb\n",
|
| 1034 |
+
" {\n",
|
| 1035 |
+
" \"name\": \"english_fineweb\",\n",
|
| 1036 |
+
" \"hf_id\": \"HuggingFaceFW/fineweb-edu\", \n",
|
| 1037 |
+
" \"subset\": \"sample-10BT\", # 使用它的 100亿 token 采样版,足够了\n",
|
| 1038 |
+
" \"ratio\": 0.15,\n",
|
| 1039 |
+
" },\n",
|
| 1040 |
+
" \n",
|
| 1041 |
+
" # --- 4. 中文通用 (5% = 0.25GB) ---\n",
|
| 1042 |
+
" # SkyPile (高质量中文)\n",
|
| 1043 |
+
" {\n",
|
| 1044 |
+
" \"name\": \"chinese_skypile\",\n",
|
| 1045 |
+
" \"hf_id\": \"Skywork/SkyPile-150B\",\n",
|
| 1046 |
+
" \"subset\": None,\n",
|
| 1047 |
+
" \"ratio\": 0.05,\n",
|
| 1048 |
+
" }\n",
|
| 1049 |
+
"]\n",
|
| 1050 |
+
"\n",
|
| 1051 |
+
"# ================= 2. 智能文本提取 =================\n",
|
| 1052 |
+
"\n",
|
| 1053 |
+
"def extract_text_from_sample(sample):\n",
|
| 1054 |
+
" # 1. 优先查找单一文本列 (按优先级排序)\n",
|
| 1055 |
+
" # 大多数数据集都在这里能找到\n",
|
| 1056 |
+
" text_cols = [\"content\", \"text\", \"body\", \"code\", \"response\"] \n",
|
| 1057 |
+
" for col in text_cols:\n",
|
| 1058 |
+
" if col in sample and isinstance(sample[col], str) and len(sample[col]) > 0:\n",
|
| 1059 |
+
" return sample[col]\n",
|
| 1060 |
+
" \n",
|
| 1061 |
+
" # 2. 特殊处理:数学问答对\n",
|
| 1062 |
+
" if \"question\" in sample and \"answer\" in sample:\n",
|
| 1063 |
+
" q = sample.get(\"question\", \"\")\n",
|
| 1064 |
+
" a = sample.get(\"answer\", \"\")\n",
|
| 1065 |
+
" return f\"Question:\\n{q}\\n\\nAnswer:\\n{a}\"\n",
|
| 1066 |
+
" \n",
|
| 1067 |
+
" # 3. 如果没找到,返回 None,让主循环跳过,而不是盲猜\n",
|
| 1068 |
+
" return None\n",
|
| 1069 |
+
"\n",
|
| 1070 |
+
"# ================= 3. 优化后的采样逻辑 =================\n",
|
| 1071 |
+
"\n",
|
| 1072 |
+
"def sample_data():\n",
|
| 1073 |
+
" if not os.path.exists(OUTPUT_DIR):\n",
|
| 1074 |
+
" os.makedirs(OUTPUT_DIR)\n",
|
| 1075 |
+
" \n",
|
| 1076 |
+
" print(f\"=== 开始采样 | 目标总大小: {TOTAL_SIZE_GB} GB ===\")\n",
|
| 1077 |
+
" \n",
|
| 1078 |
+
" generated_files = []\n",
|
| 1079 |
+
"\n",
|
| 1080 |
+
" for config in DATASET_CONFIG:\n",
|
| 1081 |
+
" target_bytes = int(config[\"ratio\"] * TOTAL_SIZE_GB * 1024**3)\n",
|
| 1082 |
+
" output_file = os.path.join(OUTPUT_DIR, f\"{config['name']}.txt\")\n",
|
| 1083 |
+
" generated_files.append(output_file)\n",
|
| 1084 |
+
"\n",
|
| 1085 |
+
" if os.path.exists(output_file) and os.path.getsize(output_file) >= target_bytes:\n",
|
| 1086 |
+
" print(f\"[已完成] {config['name']} (跳过)\")\n",
|
| 1087 |
+
" continue\n",
|
| 1088 |
+
"\n",
|
| 1089 |
+
" print(f\"\\n 正在下载: {config['name']}\")\n",
|
| 1090 |
+
"\n",
|
| 1091 |
+
" try:\n",
|
| 1092 |
+
" ds = load_dataset(\n",
|
| 1093 |
+
" config[\"hf_id\"], \n",
|
| 1094 |
+
" name=config[\"subset\"], \n",
|
| 1095 |
+
" split=\"train\", \n",
|
| 1096 |
+
" streaming=True, \n",
|
| 1097 |
+
" trust_remote_code=True\n",
|
| 1098 |
+
" )\n",
|
| 1099 |
+
" \n",
|
| 1100 |
+
" # 【优化】调大 buffer_size 以获得更好的随机性 (内存允许的话)\n",
|
| 1101 |
+
" ds_shuffled = ds.shuffle(buffer_size=100000, seed=42)\n",
|
| 1102 |
+
" \n",
|
| 1103 |
+
" current_bytes = 0\n",
|
| 1104 |
+
" pbar = tqdm(total=target_bytes, unit='B', unit_scale=True)\n",
|
| 1105 |
+
"\n",
|
| 1106 |
+
" with open(output_file, \"w\", encoding=\"utf-8\") as f:\n",
|
| 1107 |
+
" for sample in ds_shuffled:\n",
|
| 1108 |
+
" try:\n",
|
| 1109 |
+
" text = extract_text_from_sample(sample)\n",
|
| 1110 |
+
" \n",
|
| 1111 |
+
" # 【优化】严格过滤:如果是 None 或者太短,直接跳过\n",
|
| 1112 |
+
" if text is None or len(text) < 20: \n",
|
| 1113 |
+
" continue\n",
|
| 1114 |
+
" \n",
|
| 1115 |
+
" f.write(text + \"\\n<|endoftext|>\\n\")\n",
|
| 1116 |
+
" \n",
|
| 1117 |
+
" b_size = len(text.encode('utf-8'))\n",
|
| 1118 |
+
" current_bytes += b_size\n",
|
| 1119 |
+
" pbar.update(b_size)\n",
|
| 1120 |
+
"\n",
|
| 1121 |
+
" if current_bytes >= target_bytes:\n",
|
| 1122 |
+
" break\n",
|
| 1123 |
+
" except Exception:\n",
|
| 1124 |
+
" continue \n",
|
| 1125 |
+
" \n",
|
| 1126 |
+
" pbar.close()\n",
|
| 1127 |
+
"\n",
|
| 1128 |
+
" except Exception as e:\n",
|
| 1129 |
+
" print(f\"[错误] 处理 {config['name']} 失败: {e}\")\n",
|
| 1130 |
+
" \n",
|
| 1131 |
+
" return generated_files"
|
| 1132 |
+
]
|
| 1133 |
+
},
|
| 1134 |
+
{
|
| 1135 |
+
"cell_type": "code",
|
| 1136 |
+
"execution_count": 8,
|
| 1137 |
+
"id": "3d6d7bb1-7a40-4ecb-8045-b63749452d0a",
|
| 1138 |
+
"metadata": {},
|
| 1139 |
+
"outputs": [
|
| 1140 |
+
{
|
| 1141 |
+
"name": "stdout",
|
| 1142 |
+
"output_type": "stream",
|
| 1143 |
+
"text": [
|
| 1144 |
+
"正在加载分词器: /workspace/diffusionLLM/SEDDcoder_tokenizer ...\n",
|
| 1145 |
+
"\n",
|
| 1146 |
+
"📡 正在从 HuggingFace 抽取 50MB 代码数据...\n"
|
| 1147 |
+
]
|
| 1148 |
+
},
|
| 1149 |
+
{
|
| 1150 |
+
"name": "stderr",
|
| 1151 |
+
"output_type": "stream",
|
| 1152 |
+
"text": [
|
| 1153 |
+
"\n",
|
| 1154 |
+
"Sampling: 0%| | 0.00/52.4M [00:00<?, ?B/s]\u001b[A\n",
|
| 1155 |
+
"Sampling: 0%| | 6.72k/52.4M [00:00<2:08:57, 6.78kB/s]\u001b[A\n",
|
| 1156 |
+
"Sampling: 19%|█▉ | 10.0M/52.4M [00:01<00:05, 8.15MB/s] \u001b[A\n",
|
| 1157 |
+
"Sampling: 38%|███▊ | 20.0M/52.4M [00:02<00:02, 11.6MB/s]\u001b[A\n",
|
| 1158 |
+
"Sampling: 57%|█████▋ | 29.7M/52.4M [00:02<00:01, 18.8MB/s]\u001b[A\n",
|
| 1159 |
+
"Sampling: 64%|██████▍ | 33.7M/52.4M [00:02<00:01, 14.4MB/s]\u001b[A\n",
|
| 1160 |
+
"Sampling: 76%|███████▋ | 40.0M/52.4M [00:03<00:00, 14.0MB/s]\u001b[A\n",
|
| 1161 |
+
"Sampling: 52.4MB [00:03, 13.7MB/s] \u001b[A\n"
|
| 1162 |
+
]
|
| 1163 |
+
},
|
| 1164 |
+
{
|
| 1165 |
+
"name": "stdout",
|
| 1166 |
+
"output_type": "stream",
|
| 1167 |
+
"text": [
|
| 1168 |
+
"\n",
|
| 1169 |
+
"🧮 正在计算 Token 数量 (文本长度: 52390219 字符)...\n",
|
| 1170 |
+
"\n",
|
| 1171 |
+
"========================================\n",
|
| 1172 |
+
"📊 【你的 Tokenizer 效率报告】\n",
|
| 1173 |
+
"========================================\n",
|
| 1174 |
+
"采样数据: 50.00 MB\n",
|
| 1175 |
+
"Token 数量: 14303759\n",
|
| 1176 |
+
"Bytes/Token: 3.6655\n",
|
| 1177 |
+
"----------------------------------------\n",
|
| 1178 |
+
"基于你的 21.857 GB 数据总量估算:\n",
|
| 1179 |
+
"🔥 总 Token 数: \u001b[1;32m6.40 Billion (十亿)\u001b[0m\n",
|
| 1180 |
+
"----------------------------------------\n",
|
| 1181 |
+
"💡 训练建议:\n",
|
| 1182 |
+
" 你的数据量 (6.4B) 刚好或略少。\n",
|
| 1183 |
+
" ✅ 建议跑 4-8 Epochs 以充分榨干数据价值。\n",
|
| 1184 |
+
"========================================\n"
|
| 1185 |
+
]
|
| 1186 |
+
}
|
| 1187 |
+
],
|
| 1188 |
+
"source": [
|
| 1189 |
+
"import os\n",
|
| 1190 |
+
"from datasets import load_dataset\n",
|
| 1191 |
+
"from transformers import AutoTokenizer\n",
|
| 1192 |
+
"from tqdm import tqdm\n",
|
| 1193 |
+
"\n",
|
| 1194 |
+
"# ================= 配置区域 =================\n",
|
| 1195 |
+
"# 你的分词器路径 (确保文件夹名字对)\n",
|
| 1196 |
+
"TOKENIZER_PATH = \"/workspace/diffusionLLM/SEDDcoder_tokenizer\" \n",
|
| 1197 |
+
"# 目标采样大小 (50MB 足够精准了)\n",
|
| 1198 |
+
"TARGET_SIZE_MB = 50 \n",
|
| 1199 |
+
"# 使用高质量代码数据源\n",
|
| 1200 |
+
"DATASET_ID = \"tokyotech-llm/swallow-code\"\n",
|
| 1201 |
+
"SUBSET = \"exp11-scor\"\n",
|
| 1202 |
+
"\n",
|
| 1203 |
+
"def main():\n",
|
| 1204 |
+
" # 1. 加载你的分词器\n",
|
| 1205 |
+
" if not os.path.exists(TOKENIZER_PATH):\n",
|
| 1206 |
+
" print(f\"❌ 错误: 找不到分词器路径: {TOKENIZER_PATH}\")\n",
|
| 1207 |
+
" print(\"请确认你没有把分词器文件夹也一起删了。\")\n",
|
| 1208 |
+
" return\n",
|
| 1209 |
+
"\n",
|
| 1210 |
+
" print(f\"正在加载分词器: {TOKENIZER_PATH} ...\")\n",
|
| 1211 |
+
" try:\n",
|
| 1212 |
+
" tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH)\n",
|
| 1213 |
+
" except Exception as e:\n",
|
| 1214 |
+
" print(f\"❌ 分词器加载失败: {e}\")\n",
|
| 1215 |
+
" return\n",
|
| 1216 |
+
"\n",
|
| 1217 |
+
" # 2. 流式下载 50MB 数据\n",
|
| 1218 |
+
" print(f\"\\n📡 正在从 HuggingFace 抽取 {TARGET_SIZE_MB}MB 代码数据...\")\n",
|
| 1219 |
+
" try:\n",
|
| 1220 |
+
" # streaming=True 是关键,不下载全量,只读一点点\n",
|
| 1221 |
+
" ds = load_dataset(DATASET_ID, SUBSET, split=\"train\", streaming=True)\n",
|
| 1222 |
+
" \n",
|
| 1223 |
+
" collected_text = \"\"\n",
|
| 1224 |
+
" current_bytes = 0\n",
|
| 1225 |
+
" target_bytes = TARGET_SIZE_MB * 1024 * 1024\n",
|
| 1226 |
+
" \n",
|
| 1227 |
+
" pbar = tqdm(total=target_bytes, unit='B', unit_scale=True, desc=\"Sampling\")\n",
|
| 1228 |
+
" \n",
|
| 1229 |
+
" for sample in ds:\n",
|
| 1230 |
+
" # 兼容不同的列名\n",
|
| 1231 |
+
" text = sample.get('content') or sample.get('text') or sample.get('code')\n",
|
| 1232 |
+
" if not text: continue\n",
|
| 1233 |
+
" \n",
|
| 1234 |
+
" # 计算字节大小\n",
|
| 1235 |
+
" text_bytes = len(text.encode('utf-8'))\n",
|
| 1236 |
+
" \n",
|
| 1237 |
+
" collected_text += text\n",
|
| 1238 |
+
" current_bytes += text_bytes\n",
|
| 1239 |
+
" pbar.update(text_bytes)\n",
|
| 1240 |
+
" \n",
|
| 1241 |
+
" if current_bytes >= target_bytes:\n",
|
| 1242 |
+
" break\n",
|
| 1243 |
+
" pbar.close()\n",
|
| 1244 |
+
" \n",
|
| 1245 |
+
" except Exception as e:\n",
|
| 1246 |
+
" print(f\"❌ 网络错误或数据集连接失败: {e}\")\n",
|
| 1247 |
+
" return\n",
|
| 1248 |
+
"\n",
|
| 1249 |
+
" # 3. 计算压缩率\n",
|
| 1250 |
+
" print(f\"\\n🧮 正在计算 Token 数量 (文本长度: {len(collected_text)} 字符)...\")\n",
|
| 1251 |
+
" \n",
|
| 1252 |
+
" # 编码\n",
|
| 1253 |
+
" tokens = tokenizer.encode(collected_text)\n",
|
| 1254 |
+
" num_tokens = len(tokens)\n",
|
| 1255 |
+
" \n",
|
| 1256 |
+
" # 核心公式: 字节数 / Token数\n",
|
| 1257 |
+
" ratio = current_bytes / num_tokens\n",
|
| 1258 |
+
" \n",
|
| 1259 |
+
" # 4. 输出结论\n",
|
| 1260 |
+
" print(\"\\n\" + \"=\"*40)\n",
|
| 1261 |
+
" print(f\"📊 【你的 Tokenizer 效率报告】\")\n",
|
| 1262 |
+
" print(\"=\"*40)\n",
|
| 1263 |
+
" print(f\"采样数据: {current_bytes / 1024 / 1024:.2f} MB\")\n",
|
| 1264 |
+
" print(f\"Token 数量: {num_tokens}\")\n",
|
| 1265 |
+
" print(f\"Bytes/Token: {ratio:.4f}\")\n",
|
| 1266 |
+
" print(\"-\" * 40)\n",
|
| 1267 |
+
" \n",
|
| 1268 |
+
" # 5. 为你估算 21.857GB 的总 Token 数\n",
|
| 1269 |
+
" total_data_gb = 21.857\n",
|
| 1270 |
+
" total_tokens_billion = (total_data_gb * 1024**3) / ratio / 10**9\n",
|
| 1271 |
+
" \n",
|
| 1272 |
+
" print(f\"基于你的 21.857 GB 数据总量估算:\")\n",
|
| 1273 |
+
" print(f\"🔥 总 Token 数: \\033[1;32m{total_tokens_billion:.2f} Billion (十亿)\\033[0m\")\n",
|
| 1274 |
+
" \n",
|
| 1275 |
+
" print(\"-\" * 40)\n",
|
| 1276 |
+
" print(\"💡 训练建议:\")\n",
|
| 1277 |
+
" if total_tokens_billion > 8:\n",
|
| 1278 |
+
" print(f\" 你的数据量 ({total_tokens_billion:.1f}B) >> 模型需求 (8B)。\")\n",
|
| 1279 |
+
" print(\" ✅ 建议跑 1 个 Epoch 就够了 (Chinchilla Optimal)。\")\n",
|
| 1280 |
+
" print(\" 🚀 如果想强化代码能力,最多跑 2-4 Epochs。\")\n",
|
| 1281 |
+
" else:\n",
|
| 1282 |
+
" print(f\" 你的数据量 ({total_tokens_billion:.1f}B) 刚好或略少。\")\n",
|
| 1283 |
+
" print(\" ✅ 建议跑 4-8 Epochs 以充分榨干数据价值。\")\n",
|
| 1284 |
+
" print(\"=\"*40)\n",
|
| 1285 |
+
"\n",
|
| 1286 |
+
"if __name__ == \"__main__\":\n",
|
| 1287 |
+
" main()"
|
| 1288 |
+
]
|
| 1289 |
+
},
|
| 1290 |
+
{
|
| 1291 |
+
"cell_type": "code",
|
| 1292 |
+
"execution_count": null,
|
| 1293 |
+
"id": "09d91717-61ee-4054-8ca1-39c101086248",
|
| 1294 |
+
"metadata": {},
|
| 1295 |
+
"outputs": [],
|
| 1296 |
+
"source": []
|
| 1297 |
+
}
|
| 1298 |
+
],
|
| 1299 |
+
"metadata": {
|
| 1300 |
+
"kernelspec": {
|
| 1301 |
+
"display_name": "Python 3 (ipykernel)",
|
| 1302 |
+
"language": "python",
|
| 1303 |
+
"name": "python3"
|
| 1304 |
+
},
|
| 1305 |
+
"language_info": {
|
| 1306 |
+
"codemirror_mode": {
|
| 1307 |
+
"name": "ipython",
|
| 1308 |
+
"version": 3
|
| 1309 |
+
},
|
| 1310 |
+
"file_extension": ".py",
|
| 1311 |
+
"mimetype": "text/x-python",
|
| 1312 |
+
"name": "python",
|
| 1313 |
+
"nbconvert_exporter": "python",
|
| 1314 |
+
"pygments_lexer": "ipython3",
|
| 1315 |
+
"version": "3.10.12"
|
| 1316 |
+
}
|
| 1317 |
+
},
|
| 1318 |
+
"nbformat": 4,
|
| 1319 |
+
"nbformat_minor": 5
|
| 1320 |
+
}
|
SEDDcoder_tokenizer/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
SEDDcoder_tokenizer/special_tokens_map.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|fim_prefix|>",
|
| 4 |
+
"<|fim_middle|>",
|
| 5 |
+
"<|fim_suffix|>"
|
| 6 |
+
],
|
| 7 |
+
"bos_token": "<|endoftext|>",
|
| 8 |
+
"eos_token": "<|endoftext|>",
|
| 9 |
+
"mask_token": "<mask>",
|
| 10 |
+
"pad_token": "<pad>",
|
| 11 |
+
"unk_token": "<|endoftext|>"
|
| 12 |
+
}
|
SEDDcoder_tokenizer/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
SEDDcoder_tokenizer/tokenizer_config.json
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"0": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"1": {
|
| 13 |
+
"content": "<pad>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"2": {
|
| 21 |
+
"content": "<mask>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"3": {
|
| 29 |
+
"content": "<|fim_prefix|>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"4": {
|
| 37 |
+
"content": "<|fim_middle|>",
|
| 38 |
+
"lstrip": false,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false,
|
| 42 |
+
"special": true
|
| 43 |
+
},
|
| 44 |
+
"5": {
|
| 45 |
+
"content": "<|fim_suffix|>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false,
|
| 50 |
+
"special": true
|
| 51 |
+
}
|
| 52 |
+
},
|
| 53 |
+
"additional_special_tokens": [
|
| 54 |
+
"<|fim_prefix|>",
|
| 55 |
+
"<|fim_middle|>",
|
| 56 |
+
"<|fim_suffix|>"
|
| 57 |
+
],
|
| 58 |
+
"bos_token": "<|endoftext|>",
|
| 59 |
+
"clean_up_tokenization_spaces": false,
|
| 60 |
+
"eos_token": "<|endoftext|>",
|
| 61 |
+
"extra_special_tokens": {},
|
| 62 |
+
"mask_token": "<mask>",
|
| 63 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 64 |
+
"pad_token": "<pad>",
|
| 65 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 66 |
+
"unk_token": "<|endoftext|>"
|
| 67 |
+
}
|
SEDDcoder_tokenizer/tokenizer_train_data_5gb/swallow_code.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b1cd84f8c58f90ce011914174f6bfb10fe27deb8f26df549dd2a9ce001b802f
|
| 3 |
+
size 12972724
|
SEDDcoder_tokenizer/tokenizer——trainer.ipynb
ADDED
|
@@ -0,0 +1,1320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"id": "5d3c5e9a-91a4-49b4-bbff-3349bf81f2ec",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [
|
| 9 |
+
{
|
| 10 |
+
"name": "stdout",
|
| 11 |
+
"output_type": "stream",
|
| 12 |
+
"text": [
|
| 13 |
+
"Collecting datasets\n",
|
| 14 |
+
" Downloading datasets-4.4.1-py3-none-any.whl.metadata (19 kB)\n",
|
| 15 |
+
"Collecting tokenizers\n",
|
| 16 |
+
" Downloading tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.8 kB)\n",
|
| 17 |
+
"Collecting transformers\n",
|
| 18 |
+
" Downloading transformers-4.57.3-py3-none-any.whl.metadata (43 kB)\n",
|
| 19 |
+
"Collecting tqdm\n",
|
| 20 |
+
" Using cached tqdm-4.67.1-py3-none-any.whl.metadata (57 kB)\n",
|
| 21 |
+
"Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from datasets) (3.20.0)\n",
|
| 22 |
+
"Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.12/dist-packages (from datasets) (2.1.2)\n",
|
| 23 |
+
"Collecting pyarrow>=21.0.0 (from datasets)\n",
|
| 24 |
+
" Downloading pyarrow-22.0.0-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (3.2 kB)\n",
|
| 25 |
+
"Collecting dill<0.4.1,>=0.3.0 (from datasets)\n",
|
| 26 |
+
" Downloading dill-0.4.0-py3-none-any.whl.metadata (10 kB)\n",
|
| 27 |
+
"Collecting pandas (from datasets)\n",
|
| 28 |
+
" Downloading pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl.metadata (91 kB)\n",
|
| 29 |
+
"Requirement already satisfied: requests>=2.32.2 in /usr/local/lib/python3.12/dist-packages (from datasets) (2.32.5)\n",
|
| 30 |
+
"Requirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from datasets) (0.28.1)\n",
|
| 31 |
+
"Collecting xxhash (from datasets)\n",
|
| 32 |
+
" Downloading xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (13 kB)\n",
|
| 33 |
+
"Collecting multiprocess<0.70.19 (from datasets)\n",
|
| 34 |
+
" Downloading multiprocess-0.70.18-py312-none-any.whl.metadata (7.5 kB)\n",
|
| 35 |
+
"Requirement already satisfied: fsspec<=2025.10.0,>=2023.1.0 in /usr/local/lib/python3.12/dist-packages (from fsspec[http]<=2025.10.0,>=2023.1.0->datasets) (2024.6.1)\n",
|
| 36 |
+
"Collecting huggingface-hub<2.0,>=0.25.0 (from datasets)\n",
|
| 37 |
+
" Using cached huggingface_hub-1.1.7-py3-none-any.whl.metadata (13 kB)\n",
|
| 38 |
+
"Requirement already satisfied: packaging in /usr/local/lib/python3.12/dist-packages (from datasets) (25.0)\n",
|
| 39 |
+
"Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from datasets) (6.0.3)\n",
|
| 40 |
+
"Collecting aiohttp!=4.0.0a0,!=4.0.0a1 (from fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 41 |
+
" Downloading aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (8.1 kB)\n",
|
| 42 |
+
"Requirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->datasets) (4.11.0)\n",
|
| 43 |
+
"Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->datasets) (2025.10.5)\n",
|
| 44 |
+
"Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->datasets) (1.0.9)\n",
|
| 45 |
+
"Requirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->datasets) (3.10)\n",
|
| 46 |
+
"Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->datasets) (0.16.0)\n",
|
| 47 |
+
"Collecting hf-xet<2.0.0,>=1.2.0 (from huggingface-hub<2.0,>=0.25.0->datasets)\n",
|
| 48 |
+
" Using cached hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.9 kB)\n",
|
| 49 |
+
"Collecting shellingham (from huggingface-hub<2.0,>=0.25.0->datasets)\n",
|
| 50 |
+
" Downloading shellingham-1.5.4-py2.py3-none-any.whl.metadata (3.5 kB)\n",
|
| 51 |
+
"Collecting typer-slim (from huggingface-hub<2.0,>=0.25.0->datasets)\n",
|
| 52 |
+
" Downloading typer_slim-0.20.0-py3-none-any.whl.metadata (16 kB)\n",
|
| 53 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.25.0->datasets) (4.15.0)\n",
|
| 54 |
+
"Collecting huggingface-hub<2.0,>=0.25.0 (from datasets)\n",
|
| 55 |
+
" Using cached huggingface_hub-0.36.0-py3-none-any.whl.metadata (14 kB)\n",
|
| 56 |
+
"Collecting regex!=2019.12.17 (from transformers)\n",
|
| 57 |
+
" Downloading regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (40 kB)\n",
|
| 58 |
+
"Collecting safetensors>=0.4.3 (from transformers)\n",
|
| 59 |
+
" Using cached safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.1 kB)\n",
|
| 60 |
+
"Collecting aiohappyeyeballs>=2.5.0 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 61 |
+
" Using cached aiohappyeyeballs-2.6.1-py3-none-any.whl.metadata (5.9 kB)\n",
|
| 62 |
+
"Collecting aiosignal>=1.4.0 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 63 |
+
" Using cached aiosignal-1.4.0-py3-none-any.whl.metadata (3.7 kB)\n",
|
| 64 |
+
"Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.12/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets) (25.4.0)\n",
|
| 65 |
+
"Collecting frozenlist>=1.1.1 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 66 |
+
" Downloading frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl.metadata (20 kB)\n",
|
| 67 |
+
"Collecting multidict<7.0,>=4.5 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 68 |
+
" Downloading multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (5.3 kB)\n",
|
| 69 |
+
"Collecting propcache>=0.2.0 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 70 |
+
" Downloading propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (13 kB)\n",
|
| 71 |
+
"Collecting yarl<2.0,>=1.17.0 (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.10.0,>=2023.1.0->datasets)\n",
|
| 72 |
+
" Downloading yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (75 kB)\n",
|
| 73 |
+
"Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests>=2.32.2->datasets) (3.4.3)\n",
|
| 74 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests>=2.32.2->datasets) (2.5.0)\n",
|
| 75 |
+
"Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.12/dist-packages (from anyio->httpx<1.0.0->datasets) (1.3.1)\n",
|
| 76 |
+
"Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas->datasets) (2.9.0.post0)\n",
|
| 77 |
+
"Collecting pytz>=2020.1 (from pandas->datasets)\n",
|
| 78 |
+
" Using cached pytz-2025.2-py2.py3-none-any.whl.metadata (22 kB)\n",
|
| 79 |
+
"Collecting tzdata>=2022.7 (from pandas->datasets)\n",
|
| 80 |
+
" Using cached tzdata-2025.2-py2.py3-none-any.whl.metadata (1.4 kB)\n",
|
| 81 |
+
"Requirement already satisfied: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.8.2->pandas->datasets) (1.16.0)\n",
|
| 82 |
+
"Collecting click>=8.0.0 (from typer-slim->huggingface-hub<2.0,>=0.25.0->datasets)\n",
|
| 83 |
+
" Downloading click-8.3.1-py3-none-any.whl.metadata (2.6 kB)\n",
|
| 84 |
+
"Downloading datasets-4.4.1-py3-none-any.whl (511 kB)\n",
|
| 85 |
+
"Downloading dill-0.4.0-py3-none-any.whl (119 kB)\n",
|
| 86 |
+
"Using cached hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.3 MB)\n",
|
| 87 |
+
"Downloading multiprocess-0.70.18-py312-none-any.whl (150 kB)\n",
|
| 88 |
+
"Downloading tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.3 MB)\n",
|
| 89 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.3/3.3 MB\u001b[0m \u001b[31m33.7 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n",
|
| 90 |
+
"\u001b[?25hDownloading transformers-4.57.3-py3-none-any.whl (12.0 MB)\n",
|
| 91 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.0/12.0 MB\u001b[0m \u001b[31m28.6 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0mm0:00:01\u001b[0m:00:01\u001b[0m\n",
|
| 92 |
+
"\u001b[?25hUsing cached huggingface_hub-0.36.0-py3-none-any.whl (566 kB)\n",
|
| 93 |
+
"Using cached tqdm-4.67.1-py3-none-any.whl (78 kB)\n",
|
| 94 |
+
"Downloading aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (1.8 MB)\n",
|
| 95 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m37.1 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n",
|
| 96 |
+
"\u001b[?25hDownloading multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (256 kB)\n",
|
| 97 |
+
"Downloading yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (377 kB)\n",
|
| 98 |
+
"Using cached aiohappyeyeballs-2.6.1-py3-none-any.whl (15 kB)\n",
|
| 99 |
+
"Using cached aiosignal-1.4.0-py3-none-any.whl (7.5 kB)\n",
|
| 100 |
+
"Downloading frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl (242 kB)\n",
|
| 101 |
+
"Downloading propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (221 kB)\n",
|
| 102 |
+
"Downloading pyarrow-22.0.0-cp312-cp312-manylinux_2_28_x86_64.whl (47.7 MB)\n",
|
| 103 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m47.7/47.7 MB\u001b[0m \u001b[31m89.7 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m6m0:00:01\u001b[0m00:01\u001b[0m\n",
|
| 104 |
+
"\u001b[?25hDownloading regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (803 kB)\n",
|
| 105 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m803.5/803.5 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n",
|
| 106 |
+
"\u001b[?25hUsing cached safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (507 kB)\n",
|
| 107 |
+
"Downloading pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl (12.4 MB)\n",
|
| 108 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.4/12.4 MB\u001b[0m \u001b[31m46.4 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m6m0:00:01\u001b[0m\n",
|
| 109 |
+
"\u001b[?25hUsing cached pytz-2025.2-py2.py3-none-any.whl (509 kB)\n",
|
| 110 |
+
"Using cached tzdata-2025.2-py2.py3-none-any.whl (347 kB)\n",
|
| 111 |
+
"Downloading xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (193 kB)\n",
|
| 112 |
+
"Installing collected packages: pytz, xxhash, tzdata, tqdm, safetensors, regex, pyarrow, propcache, multidict, hf-xet, frozenlist, dill, aiohappyeyeballs, yarl, pandas, multiprocess, huggingface-hub, aiosignal, tokenizers, aiohttp, transformers, datasets\n",
|
| 113 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m22/22\u001b[0m [datasets]/22\u001b[0m [datasets]ers]ub]\n",
|
| 114 |
+
"\u001b[1A\u001b[2KSuccessfully installed aiohappyeyeballs-2.6.1 aiohttp-3.13.2 aiosignal-1.4.0 datasets-4.4.1 dill-0.4.0 frozenlist-1.8.0 hf-xet-1.2.0 huggingface-hub-0.36.0 multidict-6.7.0 multiprocess-0.70.18 pandas-2.3.3 propcache-0.4.1 pyarrow-22.0.0 pytz-2025.2 regex-2025.11.3 safetensors-0.7.0 tokenizers-0.22.1 tqdm-4.67.1 transformers-4.57.3 tzdata-2025.2 xxhash-3.6.0 yarl-1.22.0\n",
|
| 115 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
| 116 |
+
]
|
| 117 |
+
}
|
| 118 |
+
],
|
| 119 |
+
"source": [
|
| 120 |
+
"pip install datasets tokenizers transformers tqdm"
|
| 121 |
+
]
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"cell_type": "code",
|
| 125 |
+
"execution_count": 3,
|
| 126 |
+
"id": "40d6aead-8bb3-4e91-aff2-f76491a4d768",
|
| 127 |
+
"metadata": {},
|
| 128 |
+
"outputs": [
|
| 129 |
+
{
|
| 130 |
+
"name": "stderr",
|
| 131 |
+
"output_type": "stream",
|
| 132 |
+
"text": [
|
| 133 |
+
"`trust_remote_code` is not supported anymore.\n",
|
| 134 |
+
"Please check that the Hugging Face dataset 'tokyotech-llm/swallow-code' isn't based on a loading script and remove `trust_remote_code`.\n",
|
| 135 |
+
"If the dataset is based on a loading script, please ask the dataset author to remove it and convert it to a standard format like Parquet.\n"
|
| 136 |
+
]
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"name": "stdout",
|
| 140 |
+
"output_type": "stream",
|
| 141 |
+
"text": [
|
| 142 |
+
"=== 开始采样 | 目标总大小: 5 GB ===\n",
|
| 143 |
+
"\n",
|
| 144 |
+
" 正在下载: swallow_code\n"
|
| 145 |
+
]
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"data": {
|
| 149 |
+
"application/vnd.jupyter.widget-view+json": {
|
| 150 |
+
"model_id": "d4c7dcab1bb0400183b51dfcc1635fbc",
|
| 151 |
+
"version_major": 2,
|
| 152 |
+
"version_minor": 0
|
| 153 |
+
},
|
| 154 |
+
"text/plain": [
|
| 155 |
+
"README.md: 0.00B [00:00, ?B/s]"
|
| 156 |
+
]
|
| 157 |
+
},
|
| 158 |
+
"metadata": {},
|
| 159 |
+
"output_type": "display_data"
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"name": "stderr",
|
| 163 |
+
"output_type": "stream",
|
| 164 |
+
"text": [
|
| 165 |
+
" 0%| | 3.61M/3.76G [00:10<2:04:51, 501kB/s] "
|
| 166 |
+
]
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"ename": "KeyboardInterrupt",
|
| 170 |
+
"evalue": "",
|
| 171 |
+
"output_type": "error",
|
| 172 |
+
"traceback": [
|
| 173 |
+
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
|
| 174 |
+
"\u001b[31mKeyboardInterrupt\u001b[39m Traceback (most recent call last)",
|
| 175 |
+
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 186\u001b[39m\n\u001b[32m 183\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33m 词表大小: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfast_tokenizer.vocab_size\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 185\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[34m__name__\u001b[39m == \u001b[33m\"\u001b[39m\u001b[33m__main__\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m186\u001b[39m files = \u001b[43msample_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 187\u001b[39m train_coder_tokenizer(files)\n",
|
| 176 |
+
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 109\u001b[39m, in \u001b[36msample_data\u001b[39m\u001b[34m()\u001b[39m\n\u001b[32m 106\u001b[39m pbar = tqdm(total=target_bytes, unit=\u001b[33m'\u001b[39m\u001b[33mB\u001b[39m\u001b[33m'\u001b[39m, unit_scale=\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[32m 108\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(output_file, \u001b[33m\"\u001b[39m\u001b[33mw\u001b[39m\u001b[33m\"\u001b[39m, encoding=\u001b[33m\"\u001b[39m\u001b[33mutf-8\u001b[39m\u001b[33m\"\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[32m--> \u001b[39m\u001b[32m109\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43msample\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mds_shuffled\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 110\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mtry\u001b[39;49;00m\u001b[43m:\u001b[49m\n\u001b[32m 111\u001b[39m \u001b[43m \u001b[49m\u001b[43mtext\u001b[49m\u001b[43m \u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[43mextract_text_from_sample\u001b[49m\u001b[43m(\u001b[49m\u001b[43msample\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 177 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/iterable_dataset.py:2538\u001b[39m, in \u001b[36mIterableDataset.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 2535\u001b[39m \u001b[38;5;28;01myield\u001b[39;00m formatter.format_row(pa_table)\n\u001b[32m 2536\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m2538\u001b[39m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mexample\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mex_iterable\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 2539\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# no need to format thanks to FormattedExamplesIterable\u001b[39;49;00m\n\u001b[32m 2540\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01myield\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mexample\u001b[49m\n",
|
| 178 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/iterable_dataset.py:1714\u001b[39m, in \u001b[36mBufferShuffledExamplesIterable.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 1712\u001b[39m \u001b[38;5;66;03m# this is the shuffle buffer that we keep in memory\u001b[39;00m\n\u001b[32m 1713\u001b[39m mem_buffer = []\n\u001b[32m-> \u001b[39m\u001b[32m1714\u001b[39m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mex_iterable\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 1715\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mmem_buffer\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[43m==\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer_size\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# if the buffer is full, pick and example from it\u001b[39;49;00m\n\u001b[32m 1716\u001b[39m \u001b[43m \u001b[49m\u001b[43mi\u001b[49m\u001b[43m \u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mindices_iterator\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 179 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/iterable_dataset.py:513\u001b[39m, in \u001b[36mRebatchedArrowExamplesIterable.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 512\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__iter__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[32m--> \u001b[39m\u001b[32m513\u001b[39m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m.ex_iterable\n",
|
| 180 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/iterable_dataset.py:437\u001b[39m, in \u001b[36mShuffledDataSourcesArrowExamplesIterable.__iter__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 435\u001b[39m shard_example_idx_start = \u001b[38;5;28mself\u001b[39m._state_dict[\u001b[33m\"\u001b[39m\u001b[33mshard_example_idx\u001b[39m\u001b[33m\"\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m._state_dict \u001b[38;5;28;01melse\u001b[39;00m \u001b[32m0\u001b[39m\n\u001b[32m 436\u001b[39m shard_example_idx = \u001b[32m0\u001b[39m\n\u001b[32m--> \u001b[39m\u001b[32m437\u001b[39m \u001b[43m\u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mgenerate_tables_fn\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mgen_kwags\u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 438\u001b[39m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mshard_example_idx\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[43m<\u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[43mshard_example_idx_start\u001b[49m\u001b[43m:\u001b[49m\n\u001b[32m 439\u001b[39m \u001b[43m \u001b[49m\u001b[43mshard_example_idx\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m=\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mpa_table\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 181 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/packaged_modules/json/json.py:137\u001b[39m, in \u001b[36mJson._generate_tables\u001b[39m\u001b[34m(self, files)\u001b[39m\n\u001b[32m 133\u001b[39m encoding_errors = (\n\u001b[32m 134\u001b[39m \u001b[38;5;28mself\u001b[39m.config.encoding_errors \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.config.encoding_errors \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mstrict\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 135\u001b[39m )\n\u001b[32m 136\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m137\u001b[39m batch = \u001b[43mf\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m.\u001b[49m\u001b[43mchunksize\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 138\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m batch:\n\u001b[32m 139\u001b[39m \u001b[38;5;28;01mbreak\u001b[39;00m\n",
|
| 182 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/datasets/utils/file_utils.py:824\u001b[39m, in \u001b[36m_add_retries_to_file_obj_read_method.<locals>.read_with_retries\u001b[39m\u001b[34m(*args, **kwargs)\u001b[39m\n\u001b[32m 822\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m retry \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[32m1\u001b[39m, max_retries + \u001b[32m1\u001b[39m):\n\u001b[32m 823\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m824\u001b[39m out = \u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 825\u001b[39m \u001b[38;5;28;01mbreak\u001b[39;00m\n\u001b[32m 826\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m CONNECTION_ERRORS_TO_RETRY \u001b[38;5;28;01mas\u001b[39;00m err:\n",
|
| 183 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/hf_file_system.py:1016\u001b[39m, in \u001b[36mHfFileSystemFile.read\u001b[39m\u001b[34m(self, length)\u001b[39m\n\u001b[32m 1014\u001b[39m \u001b[38;5;28mself\u001b[39m.loc += \u001b[38;5;28mlen\u001b[39m(out)\n\u001b[32m 1015\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m out\n\u001b[32m-> \u001b[39m\u001b[32m1016\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlength\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 184 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/fsspec/spec.py:1941\u001b[39m, in \u001b[36mAbstractBufferedFile.read\u001b[39m\u001b[34m(self, length)\u001b[39m\n\u001b[32m 1938\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m length == \u001b[32m0\u001b[39m:\n\u001b[32m 1939\u001b[39m \u001b[38;5;66;03m# don't even bother calling fetch\u001b[39;00m\n\u001b[32m 1940\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33mb\u001b[39m\u001b[33m\"\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m-> \u001b[39m\u001b[32m1941\u001b[39m out = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mcache\u001b[49m\u001b[43m.\u001b[49m\u001b[43m_fetch\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mloc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mloc\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m \u001b[49m\u001b[43mlength\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1943\u001b[39m logger.debug(\n\u001b[32m 1944\u001b[39m \u001b[33m\"\u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m read: \u001b[39m\u001b[38;5;132;01m%i\u001b[39;00m\u001b[33m - \u001b[39m\u001b[38;5;132;01m%i\u001b[39;00m\u001b[33m \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m\"\u001b[39m,\n\u001b[32m 1945\u001b[39m \u001b[38;5;28mself\u001b[39m,\n\u001b[32m (...)\u001b[39m\u001b[32m 1948\u001b[39m \u001b[38;5;28mself\u001b[39m.cache._log_stats(),\n\u001b[32m 1949\u001b[39m )\n\u001b[32m 1950\u001b[39m \u001b[38;5;28mself\u001b[39m.loc += \u001b[38;5;28mlen\u001b[39m(out)\n",
|
| 185 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/fsspec/caching.py:234\u001b[39m, in \u001b[36mReadAheadCache._fetch\u001b[39m\u001b[34m(self, start, end)\u001b[39m\n\u001b[32m 232\u001b[39m end = \u001b[38;5;28mmin\u001b[39m(\u001b[38;5;28mself\u001b[39m.size, end + \u001b[38;5;28mself\u001b[39m.blocksize)\n\u001b[32m 233\u001b[39m \u001b[38;5;28mself\u001b[39m.total_requested_bytes += end - start\n\u001b[32m--> \u001b[39m\u001b[32m234\u001b[39m \u001b[38;5;28mself\u001b[39m.cache = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mfetcher\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstart\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mend\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# new block replaces old\u001b[39;00m\n\u001b[32m 235\u001b[39m \u001b[38;5;28mself\u001b[39m.start = start\n\u001b[32m 236\u001b[39m \u001b[38;5;28mself\u001b[39m.end = \u001b[38;5;28mself\u001b[39m.start + \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m.cache)\n",
|
| 186 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/hf_file_system.py:976\u001b[39m, in \u001b[36mHfFileSystemFile._fetch_range\u001b[39m\u001b[34m(self, start, end)\u001b[39m\n\u001b[32m 965\u001b[39m headers = {\n\u001b[32m 966\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mrange\u001b[39m\u001b[33m\"\u001b[39m: \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mbytes=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mstart\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m-\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mend\u001b[38;5;250m \u001b[39m-\u001b[38;5;250m \u001b[39m\u001b[32m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m,\n\u001b[32m 967\u001b[39m **\u001b[38;5;28mself\u001b[39m.fs._api._build_hf_headers(),\n\u001b[32m 968\u001b[39m }\n\u001b[32m 969\u001b[39m url = hf_hub_url(\n\u001b[32m 970\u001b[39m repo_id=\u001b[38;5;28mself\u001b[39m.resolved_path.repo_id,\n\u001b[32m 971\u001b[39m revision=\u001b[38;5;28mself\u001b[39m.resolved_path.revision,\n\u001b[32m (...)\u001b[39m\u001b[32m 974\u001b[39m endpoint=\u001b[38;5;28mself\u001b[39m.fs.endpoint,\n\u001b[32m 975\u001b[39m )\n\u001b[32m--> \u001b[39m\u001b[32m976\u001b[39m r = \u001b[43mhttp_backoff\u001b[49m\u001b[43m(\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mGET\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[43m=\u001b[49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mconstants\u001b[49m\u001b[43m.\u001b[49m\u001b[43mHF_HUB_DOWNLOAD_TIMEOUT\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 977\u001b[39m hf_raise_for_status(r)\n\u001b[32m 978\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m r.content\n",
|
| 187 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_http.py:306\u001b[39m, in \u001b[36mhttp_backoff\u001b[39m\u001b[34m(method, url, max_retries, base_wait_time, max_wait_time, retry_on_exceptions, retry_on_status_codes, **kwargs)\u001b[39m\n\u001b[32m 303\u001b[39m kwargs[\u001b[33m\"\u001b[39m\u001b[33mdata\u001b[39m\u001b[33m\"\u001b[39m].seek(io_obj_initial_pos)\n\u001b[32m 305\u001b[39m \u001b[38;5;66;03m# Perform request and return if status_code is not in the retry list.\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m306\u001b[39m response = \u001b[43msession\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m=\u001b[49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 307\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m response.status_code \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m retry_on_status_codes:\n\u001b[32m 308\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m response\n",
|
| 188 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/sessions.py:589\u001b[39m, in \u001b[36mSession.request\u001b[39m\u001b[34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[39m\n\u001b[32m 584\u001b[39m send_kwargs = {\n\u001b[32m 585\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mtimeout\u001b[39m\u001b[33m\"\u001b[39m: timeout,\n\u001b[32m 586\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mallow_redirects\u001b[39m\u001b[33m\"\u001b[39m: allow_redirects,\n\u001b[32m 587\u001b[39m }\n\u001b[32m 588\u001b[39m send_kwargs.update(settings)\n\u001b[32m--> \u001b[39m\u001b[32m589\u001b[39m resp = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprep\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43msend_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 591\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m resp\n",
|
| 189 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/sessions.py:724\u001b[39m, in \u001b[36mSession.send\u001b[39m\u001b[34m(self, request, **kwargs)\u001b[39m\n\u001b[32m 721\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m allow_redirects:\n\u001b[32m 722\u001b[39m \u001b[38;5;66;03m# Redirect resolving generator.\u001b[39;00m\n\u001b[32m 723\u001b[39m gen = \u001b[38;5;28mself\u001b[39m.resolve_redirects(r, request, **kwargs)\n\u001b[32m--> \u001b[39m\u001b[32m724\u001b[39m history = \u001b[43m[\u001b[49m\u001b[43mresp\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mresp\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mgen\u001b[49m\u001b[43m]\u001b[49m\n\u001b[32m 725\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 726\u001b[39m history = []\n",
|
| 190 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/sessions.py:265\u001b[39m, in \u001b[36mSessionRedirectMixin.resolve_redirects\u001b[39m\u001b[34m(self, resp, req, stream, timeout, verify, cert, proxies, yield_requests, **adapter_kwargs)\u001b[39m\n\u001b[32m 263\u001b[39m \u001b[38;5;28;01myield\u001b[39;00m req\n\u001b[32m 264\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m265\u001b[39m resp = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 266\u001b[39m \u001b[43m \u001b[49m\u001b[43mreq\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 267\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 268\u001b[39m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 269\u001b[39m \u001b[43m \u001b[49m\u001b[43mverify\u001b[49m\u001b[43m=\u001b[49m\u001b[43mverify\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 270\u001b[39m \u001b[43m \u001b[49m\u001b[43mcert\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcert\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 271\u001b[39m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[43m=\u001b[49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 272\u001b[39m \u001b[43m \u001b[49m\u001b[43mallow_redirects\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 273\u001b[39m \u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43madapter_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 274\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 276\u001b[39m extract_cookies_to_jar(\u001b[38;5;28mself\u001b[39m.cookies, prepared_request, resp.raw)\n\u001b[32m 278\u001b[39m \u001b[38;5;66;03m# extract redirect url, if any, for the next loop\u001b[39;00m\n",
|
| 191 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/sessions.py:746\u001b[39m, in \u001b[36mSession.send\u001b[39m\u001b[34m(self, request, **kwargs)\u001b[39m\n\u001b[32m 743\u001b[39m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[32m 745\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m stream:\n\u001b[32m--> \u001b[39m\u001b[32m746\u001b[39m \u001b[43mr\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcontent\u001b[49m\n\u001b[32m 748\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m r\n",
|
| 192 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/models.py:902\u001b[39m, in \u001b[36mResponse.content\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 900\u001b[39m \u001b[38;5;28mself\u001b[39m._content = \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 901\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m902\u001b[39m \u001b[38;5;28mself\u001b[39m._content = \u001b[33;43mb\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43miter_content\u001b[49m\u001b[43m(\u001b[49m\u001b[43mCONTENT_CHUNK_SIZE\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;129;01mor\u001b[39;00m \u001b[33mb\u001b[39m\u001b[33m\"\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 904\u001b[39m \u001b[38;5;28mself\u001b[39m._content_consumed = \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[32m 905\u001b[39m \u001b[38;5;66;03m# don't need to release the connection; that's been handled by urllib3\u001b[39;00m\n\u001b[32m 906\u001b[39m \u001b[38;5;66;03m# since we exhausted the data.\u001b[39;00m\n",
|
| 193 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/requests/models.py:820\u001b[39m, in \u001b[36mResponse.iter_content.<locals>.generate\u001b[39m\u001b[34m()\u001b[39m\n\u001b[32m 818\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m.raw, \u001b[33m\"\u001b[39m\u001b[33mstream\u001b[39m\u001b[33m\"\u001b[39m):\n\u001b[32m 819\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m820\u001b[39m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m.raw.stream(chunk_size, decode_content=\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[32m 821\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m ProtocolError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[32m 822\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m ChunkedEncodingError(e)\n",
|
| 194 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/urllib3/response.py:1091\u001b[39m, in \u001b[36mHTTPResponse.stream\u001b[39m\u001b[34m(self, amt, decode_content)\u001b[39m\n\u001b[32m 1089\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1090\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_fp_closed(\u001b[38;5;28mself\u001b[39m._fp) \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m._decoded_buffer) > \u001b[32m0\u001b[39m:\n\u001b[32m-> \u001b[39m\u001b[32m1091\u001b[39m data = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m=\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[43m=\u001b[49m\u001b[43mdecode_content\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1093\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m data:\n\u001b[32m 1094\u001b[39m \u001b[38;5;28;01myield\u001b[39;00m data\n",
|
| 195 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/urllib3/response.py:980\u001b[39m, in \u001b[36mHTTPResponse.read\u001b[39m\u001b[34m(self, amt, decode_content, cache_content)\u001b[39m\n\u001b[32m 977\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m._decoded_buffer) >= amt:\n\u001b[32m 978\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._decoded_buffer.get(amt)\n\u001b[32m--> \u001b[39m\u001b[32m980\u001b[39m data = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_raw_read\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 982\u001b[39m flush_decoder = amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m (amt != \u001b[32m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data)\n\u001b[32m 984\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m._decoded_buffer) == \u001b[32m0\u001b[39m:\n",
|
| 196 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/urllib3/response.py:904\u001b[39m, in \u001b[36mHTTPResponse._raw_read\u001b[39m\u001b[34m(self, amt, read1)\u001b[39m\n\u001b[32m 901\u001b[39m fp_closed = \u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mself\u001b[39m._fp, \u001b[33m\"\u001b[39m\u001b[33mclosed\u001b[39m\u001b[33m\"\u001b[39m, \u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[32m 903\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m._error_catcher():\n\u001b[32m--> \u001b[39m\u001b[32m904\u001b[39m data = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_fp_read\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mread1\u001b[49m\u001b[43m=\u001b[49m\u001b[43mread1\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m fp_closed \u001b[38;5;28;01melse\u001b[39;00m \u001b[33mb\u001b[39m\u001b[33m\"\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 905\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m amt != \u001b[32m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m data:\n\u001b[32m 906\u001b[39m \u001b[38;5;66;03m# Platform-specific: Buggy versions of Python.\u001b[39;00m\n\u001b[32m 907\u001b[39m \u001b[38;5;66;03m# Close the connection when no data is returned\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 912\u001b[39m \u001b[38;5;66;03m# not properly close the connection in all cases. There is\u001b[39;00m\n\u001b[32m 913\u001b[39m \u001b[38;5;66;03m# no harm in redundantly calling close.\u001b[39;00m\n\u001b[32m 914\u001b[39m \u001b[38;5;28mself\u001b[39m._fp.close()\n",
|
| 197 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/local/lib/python3.12/dist-packages/urllib3/response.py:887\u001b[39m, in \u001b[36mHTTPResponse._fp_read\u001b[39m\u001b[34m(self, amt, read1)\u001b[39m\n\u001b[32m 884\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._fp.read1(amt) \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m._fp.read1()\n\u001b[32m 885\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 886\u001b[39m \u001b[38;5;66;03m# StringIO doesn't like amt=None\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m887\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_fp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m amt \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m._fp.read()\n",
|
| 198 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/lib/python3.12/http/client.py:479\u001b[39m, in \u001b[36mHTTPResponse.read\u001b[39m\u001b[34m(self, amt)\u001b[39m\n\u001b[32m 476\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.length \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m amt > \u001b[38;5;28mself\u001b[39m.length:\n\u001b[32m 477\u001b[39m \u001b[38;5;66;03m# clip the read to the \"end of response\"\u001b[39;00m\n\u001b[32m 478\u001b[39m amt = \u001b[38;5;28mself\u001b[39m.length\n\u001b[32m--> \u001b[39m\u001b[32m479\u001b[39m s = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mfp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mamt\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 480\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m s \u001b[38;5;129;01mand\u001b[39;00m amt:\n\u001b[32m 481\u001b[39m \u001b[38;5;66;03m# Ideally, we would raise IncompleteRead if the content-length\u001b[39;00m\n\u001b[32m 482\u001b[39m \u001b[38;5;66;03m# wasn't satisfied, but it might break compatibility.\u001b[39;00m\n\u001b[32m 483\u001b[39m \u001b[38;5;28mself\u001b[39m._close_conn()\n",
|
| 199 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/lib/python3.12/socket.py:707\u001b[39m, in \u001b[36mSocketIO.readinto\u001b[39m\u001b[34m(self, b)\u001b[39m\n\u001b[32m 705\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[32m 706\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m707\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_sock\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrecv_into\u001b[49m\u001b[43m(\u001b[49m\u001b[43mb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 708\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m timeout:\n\u001b[32m 709\u001b[39m \u001b[38;5;28mself\u001b[39m._timeout_occurred = \u001b[38;5;28;01mTrue\u001b[39;00m\n",
|
| 200 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/lib/python3.12/ssl.py:1252\u001b[39m, in \u001b[36mSSLSocket.recv_into\u001b[39m\u001b[34m(self, buffer, nbytes, flags)\u001b[39m\n\u001b[32m 1248\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m flags != \u001b[32m0\u001b[39m:\n\u001b[32m 1249\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 1250\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mnon-zero flags not allowed in calls to recv_into() on \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m\"\u001b[39m %\n\u001b[32m 1251\u001b[39m \u001b[38;5;28mself\u001b[39m.\u001b[34m__class__\u001b[39m)\n\u001b[32m-> \u001b[39m\u001b[32m1252\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnbytes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1253\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1254\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m().recv_into(buffer, nbytes, flags)\n",
|
| 201 |
+
"\u001b[36mFile \u001b[39m\u001b[32m/usr/lib/python3.12/ssl.py:1104\u001b[39m, in \u001b[36mSSLSocket.read\u001b[39m\u001b[34m(self, len, buffer)\u001b[39m\n\u001b[32m 1102\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m 1103\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m buffer \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1104\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_sslobj\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1105\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1106\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._sslobj.read(\u001b[38;5;28mlen\u001b[39m)\n",
|
| 202 |
+
"\u001b[31mKeyboardInterrupt\u001b[39m: "
|
| 203 |
+
]
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"name": "stderr",
|
| 207 |
+
"output_type": "stream",
|
| 208 |
+
"text": [
|
| 209 |
+
" 0%| | 12.9M/3.76G [00:27<2:04:33, 501kB/s]"
|
| 210 |
+
]
|
| 211 |
+
}
|
| 212 |
+
],
|
| 213 |
+
"source": [
|
| 214 |
+
"import os\n",
|
| 215 |
+
"import glob\n",
|
| 216 |
+
"from datasets import load_dataset\n",
|
| 217 |
+
"from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers\n",
|
| 218 |
+
"from tqdm import tqdm\n",
|
| 219 |
+
"from transformers import GPT2TokenizerFast\n",
|
| 220 |
+
"\n",
|
| 221 |
+
"# ================= 1. 全局配置 =================\n",
|
| 222 |
+
"\n",
|
| 223 |
+
"# 总采样大小: 5GB\n",
|
| 224 |
+
"TOTAL_SIZE_GB = 5\n",
|
| 225 |
+
"OUTPUT_DIR = \"./tokenizer_train_data_5gb\"\n",
|
| 226 |
+
"\n",
|
| 227 |
+
"# 数据集配比配置\n",
|
| 228 |
+
"DATASET_CONFIG = [\n",
|
| 229 |
+
" # --- 1. 代码 (70% = 3.5GB) ---\n",
|
| 230 |
+
" # 指定使用 Swallow-Code 的 \"exp11-scor\" 子集\n",
|
| 231 |
+
" {\n",
|
| 232 |
+
" \"name\": \"swallow_code\",\n",
|
| 233 |
+
" \"hf_id\": \"tokyotech-llm/swallow-code\",\n",
|
| 234 |
+
" \"subset\": \"exp11-scor\", \n",
|
| 235 |
+
" \"ratio\": 0.70,\n",
|
| 236 |
+
" },\n",
|
| 237 |
+
" \n",
|
| 238 |
+
" # --- 2. 数学 (10% = 0.5GB) ---\n",
|
| 239 |
+
" # Swallow-Math\n",
|
| 240 |
+
" {\n",
|
| 241 |
+
" \"name\": \"swallow_math\",\n",
|
| 242 |
+
" \"hf_id\": \"tokyotech-llm/swallow-math\",\n",
|
| 243 |
+
" \"subset\": None, # 默认配置\n",
|
| 244 |
+
" \"ratio\": 0.10,\n",
|
| 245 |
+
" },\n",
|
| 246 |
+
" \n",
|
| 247 |
+
" # --- 3. 英文通用 (15% = 0.75GB) ---\n",
|
| 248 |
+
" # Fineweb\n",
|
| 249 |
+
" {\n",
|
| 250 |
+
" \"name\": \"english_fineweb\",\n",
|
| 251 |
+
" \"hf_id\": \"HuggingFaceFW/fineweb-edu\", \n",
|
| 252 |
+
" \"subset\": \"sample-10BT\", # 使用它的 100亿 token 采样版,足够了\n",
|
| 253 |
+
" \"ratio\": 0.15,\n",
|
| 254 |
+
" },\n",
|
| 255 |
+
" \n",
|
| 256 |
+
" # --- 4. 中文通用 (5% = 0.25GB) ---\n",
|
| 257 |
+
" # SkyPile (高质量中文)\n",
|
| 258 |
+
" {\n",
|
| 259 |
+
" \"name\": \"chinese_skypile\",\n",
|
| 260 |
+
" \"hf_id\": \"Skywork/SkyPile-150B\",\n",
|
| 261 |
+
" \"subset\": None,\n",
|
| 262 |
+
" \"ratio\": 0.05,\n",
|
| 263 |
+
" }\n",
|
| 264 |
+
"]\n",
|
| 265 |
+
"\n",
|
| 266 |
+
"# ================= 2. 智能文本提取 =================\n",
|
| 267 |
+
"\n",
|
| 268 |
+
"def extract_text_from_sample(sample):\n",
|
| 269 |
+
" # 1. 优先查找单一文本列 (按优先级排序)\n",
|
| 270 |
+
" # 大多数数据集都在这里能找到\n",
|
| 271 |
+
" text_cols = [\"content\", \"text\", \"body\", \"code\", \"response\"] \n",
|
| 272 |
+
" for col in text_cols:\n",
|
| 273 |
+
" if col in sample and isinstance(sample[col], str) and len(sample[col]) > 0:\n",
|
| 274 |
+
" return sample[col]\n",
|
| 275 |
+
" \n",
|
| 276 |
+
" # 2. 特殊处理:数学问答对\n",
|
| 277 |
+
" if \"question\" in sample and \"answer\" in sample:\n",
|
| 278 |
+
" q = sample.get(\"question\", \"\")\n",
|
| 279 |
+
" a = sample.get(\"answer\", \"\")\n",
|
| 280 |
+
" return f\"Question:\\n{q}\\n\\nAnswer:\\n{a}\"\n",
|
| 281 |
+
" \n",
|
| 282 |
+
" # 3. 如果没找到,返回 None,让主循环跳过,而不是盲猜\n",
|
| 283 |
+
" return None\n",
|
| 284 |
+
"\n",
|
| 285 |
+
"# ================= 3. 优化后的采样逻辑 =================\n",
|
| 286 |
+
"\n",
|
| 287 |
+
"def sample_data():\n",
|
| 288 |
+
" if not os.path.exists(OUTPUT_DIR):\n",
|
| 289 |
+
" os.makedirs(OUTPUT_DIR)\n",
|
| 290 |
+
" \n",
|
| 291 |
+
" print(f\"=== 开始采样 | 目标总大小: {TOTAL_SIZE_GB} GB ===\")\n",
|
| 292 |
+
" \n",
|
| 293 |
+
" generated_files = []\n",
|
| 294 |
+
"\n",
|
| 295 |
+
" for config in DATASET_CONFIG:\n",
|
| 296 |
+
" target_bytes = int(config[\"ratio\"] * TOTAL_SIZE_GB * 1024**3)\n",
|
| 297 |
+
" output_file = os.path.join(OUTPUT_DIR, f\"{config['name']}.txt\")\n",
|
| 298 |
+
" generated_files.append(output_file)\n",
|
| 299 |
+
"\n",
|
| 300 |
+
" if os.path.exists(output_file) and os.path.getsize(output_file) >= target_bytes:\n",
|
| 301 |
+
" print(f\"[已完成] {config['name']} (跳过)\")\n",
|
| 302 |
+
" continue\n",
|
| 303 |
+
"\n",
|
| 304 |
+
" print(f\"\\n 正在下载: {config['name']}\")\n",
|
| 305 |
+
"\n",
|
| 306 |
+
" try:\n",
|
| 307 |
+
" ds = load_dataset(\n",
|
| 308 |
+
" config[\"hf_id\"], \n",
|
| 309 |
+
" name=config[\"subset\"], \n",
|
| 310 |
+
" split=\"train\", \n",
|
| 311 |
+
" streaming=True, \n",
|
| 312 |
+
" trust_remote_code=True\n",
|
| 313 |
+
" )\n",
|
| 314 |
+
" \n",
|
| 315 |
+
" # 【优化】调大 buffer_size 以获得更好的随机性 (内存允许的话)\n",
|
| 316 |
+
" ds_shuffled = ds.shuffle(buffer_size=100000, seed=42)\n",
|
| 317 |
+
" \n",
|
| 318 |
+
" current_bytes = 0\n",
|
| 319 |
+
" pbar = tqdm(total=target_bytes, unit='B', unit_scale=True)\n",
|
| 320 |
+
"\n",
|
| 321 |
+
" with open(output_file, \"w\", encoding=\"utf-8\") as f:\n",
|
| 322 |
+
" for sample in ds_shuffled:\n",
|
| 323 |
+
" try:\n",
|
| 324 |
+
" text = extract_text_from_sample(sample)\n",
|
| 325 |
+
" \n",
|
| 326 |
+
" # 【优化】严格过滤:如果是 None 或者太短,直接跳过\n",
|
| 327 |
+
" if text is None or len(text) < 20: \n",
|
| 328 |
+
" continue\n",
|
| 329 |
+
" \n",
|
| 330 |
+
" f.write(text + \"\\n<|endoftext|>\\n\")\n",
|
| 331 |
+
" \n",
|
| 332 |
+
" b_size = len(text.encode('utf-8'))\n",
|
| 333 |
+
" current_bytes += b_size\n",
|
| 334 |
+
" pbar.update(b_size)\n",
|
| 335 |
+
"\n",
|
| 336 |
+
" if current_bytes >= target_bytes:\n",
|
| 337 |
+
" break\n",
|
| 338 |
+
" except Exception:\n",
|
| 339 |
+
" continue \n",
|
| 340 |
+
" \n",
|
| 341 |
+
" pbar.close()\n",
|
| 342 |
+
"\n",
|
| 343 |
+
" except Exception as e:\n",
|
| 344 |
+
" print(f\"[错误] 处理 {config['name']} 失败: {e}\")\n",
|
| 345 |
+
" \n",
|
| 346 |
+
" return generated_files\n",
|
| 347 |
+
"\n",
|
| 348 |
+
"# ================= 4. 优化后的训练逻辑 =================\n",
|
| 349 |
+
"\n",
|
| 350 |
+
"def train_coder_tokenizer(files):\n",
|
| 351 |
+
" print(\"\\n🔨 === 开始训练 Coder Tokenizer (BPE) ===\")\n",
|
| 352 |
+
"\n",
|
| 353 |
+
" valid_files = [f for f in files if os.path.exists(f) and os.path.getsize(f) > 0]\n",
|
| 354 |
+
" if not valid_files:\n",
|
| 355 |
+
" print(\"没有有效的数据文件,终止训练。\")\n",
|
| 356 |
+
" return\n",
|
| 357 |
+
"\n",
|
| 358 |
+
" tokenizer = Tokenizer(models.BPE())\n",
|
| 359 |
+
" tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)\n",
|
| 360 |
+
" tokenizer.decoder = decoders.ByteLevel()\n",
|
| 361 |
+
"\n",
|
| 362 |
+
" special_tokens = [\n",
|
| 363 |
+
" \"<|endoftext|>\",\n",
|
| 364 |
+
" \"<pad>\",\n",
|
| 365 |
+
" \"<mask>\",\n",
|
| 366 |
+
" \"<|fim_prefix|>\",\n",
|
| 367 |
+
" \"<|fim_middle|>\",\n",
|
| 368 |
+
" \"<|fim_suffix|>\"\n",
|
| 369 |
+
" ]\n",
|
| 370 |
+
"\n",
|
| 371 |
+
" trainer = trainers.BpeTrainer(\n",
|
| 372 |
+
" vocab_size=49152,\n",
|
| 373 |
+
" min_frequency=2,\n",
|
| 374 |
+
" special_tokens=special_tokens,\n",
|
| 375 |
+
" initial_alphabet=pre_tokenizers.ByteLevel.alphabet()\n",
|
| 376 |
+
" )\n",
|
| 377 |
+
"\n",
|
| 378 |
+
" tokenizer.train(valid_files, trainer=trainer)\n",
|
| 379 |
+
"\n",
|
| 380 |
+
" save_path = \"./SEDDcoder_tokenizer\"\n",
|
| 381 |
+
" if not os.path.exists(save_path):\n",
|
| 382 |
+
" os.makedirs(save_path)\n",
|
| 383 |
+
"\n",
|
| 384 |
+
" fast_tokenizer = GPT2TokenizerFast(\n",
|
| 385 |
+
" tokenizer_object=tokenizer,\n",
|
| 386 |
+
" bos_token=\"<|endoftext|>\",\n",
|
| 387 |
+
" eos_token=\"<|endoftext|>\", \n",
|
| 388 |
+
" unk_token=\"<|endoftext|>\",\n",
|
| 389 |
+
" mask_token=\"<mask>\",\n",
|
| 390 |
+
" pad_token=\"<pad>\",\n",
|
| 391 |
+
" additional_special_tokens=[\"<|fim_prefix|>\", \"<|fim_middle|>\", \"<|fim_suffix|>\"]\n",
|
| 392 |
+
" )\n",
|
| 393 |
+
"\n",
|
| 394 |
+
" fast_tokenizer.save_pretrained(save_path)\n",
|
| 395 |
+
" print(f\"\\n模型已保存至: {save_path}\")\n",
|
| 396 |
+
" print(f\" 词表大小: {fast_tokenizer.vocab_size}\")\n",
|
| 397 |
+
"\n",
|
| 398 |
+
"if __name__ == \"__main__\":\n",
|
| 399 |
+
" files = sample_data()\n",
|
| 400 |
+
" train_coder_tokenizer(files)"
|
| 401 |
+
]
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"cell_type": "code",
|
| 405 |
+
"execution_count": 2,
|
| 406 |
+
"id": "d0bf613b-7344-4736-b142-e0b8461dfbd6",
|
| 407 |
+
"metadata": {},
|
| 408 |
+
"outputs": [
|
| 409 |
+
{
|
| 410 |
+
"name": "stdout",
|
| 411 |
+
"output_type": "stream",
|
| 412 |
+
"text": [
|
| 413 |
+
"Collecting hf_transfer\n",
|
| 414 |
+
" Downloading hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.7 kB)\n",
|
| 415 |
+
"Downloading hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.6 MB)\n",
|
| 416 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.6/3.6 MB\u001b[0m \u001b[31m29.1 MB/s\u001b[0m \u001b[33m0:00:00\u001b[0m\n",
|
| 417 |
+
"\u001b[?25hInstalling collected packages: hf_transfer\n",
|
| 418 |
+
"Successfully installed hf_transfer-0.1.9\n",
|
| 419 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
| 420 |
+
]
|
| 421 |
+
}
|
| 422 |
+
],
|
| 423 |
+
"source": [
|
| 424 |
+
"pip install hf_transfer"
|
| 425 |
+
]
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"cell_type": "code",
|
| 429 |
+
"execution_count": 7,
|
| 430 |
+
"id": "59fb07a4-1f0e-4ea8-8e71-90226bc33de2",
|
| 431 |
+
"metadata": {},
|
| 432 |
+
"outputs": [
|
| 433 |
+
{
|
| 434 |
+
"name": "stderr",
|
| 435 |
+
"output_type": "stream",
|
| 436 |
+
"text": [
|
| 437 |
+
"<>:10: SyntaxWarning: invalid escape sequence '\\s'\n",
|
| 438 |
+
"<>:10: SyntaxWarning: invalid escape sequence '\\s'\n",
|
| 439 |
+
"/tmp/ipykernel_3132/3547788621.py:10: SyntaxWarning: invalid escape sequence '\\s'\n",
|
| 440 |
+
" TEST_TEXT = \"\"\"def calculate_loss(y_true, y_pred):\n"
|
| 441 |
+
]
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"name": "stdout",
|
| 445 |
+
"output_type": "stream",
|
| 446 |
+
"text": [
|
| 447 |
+
"正在加载分词器...\n",
|
| 448 |
+
"✅ 加载成功!词表大小: 49152\n",
|
| 449 |
+
"\n",
|
| 450 |
+
"==================== 1. 肉眼观察切分效果 ====================\n",
|
| 451 |
+
"前 5000 个 Token 展示 (注意 Ġ 代表空格):\n",
|
| 452 |
+
"[ 0] def\n",
|
| 453 |
+
"[ 1] Ġcalculate\n",
|
| 454 |
+
"[ 2] _\n",
|
| 455 |
+
"[ 3] loss\n",
|
| 456 |
+
"[ 4] (\n",
|
| 457 |
+
"[ 5] y\n",
|
| 458 |
+
"[ 6] _\n",
|
| 459 |
+
"[ 7] true\n",
|
| 460 |
+
"[ 8] ,\n",
|
| 461 |
+
"[ 9] Ġy\n",
|
| 462 |
+
"[10] _\n",
|
| 463 |
+
"[11] pred\n",
|
| 464 |
+
"[12] ):\n",
|
| 465 |
+
"[13] Ċ\n",
|
| 466 |
+
"[14] ĠĠĠ\n",
|
| 467 |
+
"[15] Ġ#\n",
|
| 468 |
+
"[16] ĠThis\n",
|
| 469 |
+
"[17] Ġis\n",
|
| 470 |
+
"[18] Ġa\n",
|
| 471 |
+
"[19] Ġcomment\n",
|
| 472 |
+
"[20] Ċ\n",
|
| 473 |
+
"[21] ĠĠĠ\n",
|
| 474 |
+
"[22] Ġ#\n",
|
| 475 |
+
"[23] Ġ\n",
|
| 476 |
+
"[24] 计ç®Ĺ\n",
|
| 477 |
+
"[25] åĿĩ\n",
|
| 478 |
+
"[26] æĸ¹\n",
|
| 479 |
+
"[27] 误差\n",
|
| 480 |
+
"[28] Ċ\n",
|
| 481 |
+
"[29] ĠĠĠ\n",
|
| 482 |
+
"[30] Ġerror\n",
|
| 483 |
+
"[31] Ġ=\n",
|
| 484 |
+
"[32] Ġy\n",
|
| 485 |
+
"[33] _\n",
|
| 486 |
+
"[34] true\n",
|
| 487 |
+
"[35] Ġ-\n",
|
| 488 |
+
"[36] Ġy\n",
|
| 489 |
+
"[37] _\n",
|
| 490 |
+
"[38] pred\n",
|
| 491 |
+
"[39] Ċ\n",
|
| 492 |
+
"[40] ĠĠĠ\n",
|
| 493 |
+
"[41] Ġmse\n",
|
| 494 |
+
"[42] Ġ=\n",
|
| 495 |
+
"[43] Ġnp\n",
|
| 496 |
+
"[44] .\n",
|
| 497 |
+
"[45] mean\n",
|
| 498 |
+
"[46] (\n",
|
| 499 |
+
"[47] error\n",
|
| 500 |
+
"[48] Ġ**\n",
|
| 501 |
+
"[49] Ġ2\n",
|
| 502 |
+
"[50] )\n",
|
| 503 |
+
"[51] Ċ\n",
|
| 504 |
+
"[52] ĠĠĠ\n",
|
| 505 |
+
"[53] Ġreturn\n",
|
| 506 |
+
"[54] Ġmse\n",
|
| 507 |
+
"[55] Ċ\n",
|
| 508 |
+
"[56] Ċ\n",
|
| 509 |
+
"[57] #\n",
|
| 510 |
+
"[58] ĠMath\n",
|
| 511 |
+
"[59] Ġformula\n",
|
| 512 |
+
"[60] :\n",
|
| 513 |
+
"[61] Ċ\n",
|
| 514 |
+
"[62] #\n",
|
| 515 |
+
"[63] Ġf\n",
|
| 516 |
+
"[64] (\n",
|
| 517 |
+
"[65] x\n",
|
| 518 |
+
"[66] )\n",
|
| 519 |
+
"[67] Ġ=\n",
|
| 520 |
+
"[68] Ġ\n",
|
| 521 |
+
"[69] Č\n",
|
| 522 |
+
"[70] rac\n",
|
| 523 |
+
"[71] {\n",
|
| 524 |
+
"[72] 1\n",
|
| 525 |
+
"[73] }{\\\n",
|
| 526 |
+
"[74] sqrt\n",
|
| 527 |
+
"[75] {\n",
|
| 528 |
+
"[76] 2\n",
|
| 529 |
+
"[77] \\\n",
|
| 530 |
+
"[78] pi\n",
|
| 531 |
+
"[79] \\\n",
|
| 532 |
+
"[80] sigma\n",
|
| 533 |
+
"[81] ^\n",
|
| 534 |
+
"[82] 2\n",
|
| 535 |
+
"[83] }}\n",
|
| 536 |
+
"[84] Ġe\n",
|
| 537 |
+
"[85] ^{-\n",
|
| 538 |
+
"[86] Č\n",
|
| 539 |
+
"[87] rac\n",
|
| 540 |
+
"[88] {(\n",
|
| 541 |
+
"[89] x\n",
|
| 542 |
+
"[90] -\\\n",
|
| 543 |
+
"[91] mu\n",
|
| 544 |
+
"[92] )^\n",
|
| 545 |
+
"[93] 2\n",
|
| 546 |
+
"[94] }{\n",
|
| 547 |
+
"[95] 2\n",
|
| 548 |
+
"[96] \\\n",
|
| 549 |
+
"[97] sigma\n",
|
| 550 |
+
"[98] ^\n",
|
| 551 |
+
"[99] 2\n",
|
| 552 |
+
"[100] }}\n",
|
| 553 |
+
"[101] Ċ\n",
|
| 554 |
+
"\n",
|
| 555 |
+
"==================== 2. 无损还原测试 ====================\n",
|
| 556 |
+
"✅ 完美还原!输入与输出完全一致。\n",
|
| 557 |
+
"\n",
|
| 558 |
+
"==================== 3. 关键能力核查 ====================\n",
|
| 559 |
+
"测试 4个空格缩进: ['ĠĠĠĠ']\n",
|
| 560 |
+
" -> ⭐ 优秀!4个空格被识别为 1 个 token。\n",
|
| 561 |
+
"\n",
|
| 562 |
+
"测试关键字完整性:\n",
|
| 563 |
+
" ' def' -> ['Ġdef']\n",
|
| 564 |
+
" ' return' -> ['Ġreturn']\n",
|
| 565 |
+
" ' import' -> ['Ġimport']\n",
|
| 566 |
+
" ' class' -> ['Ġclass']\n",
|
| 567 |
+
" ' numpy' -> ['Ġnumpy']\n",
|
| 568 |
+
"\n",
|
| 569 |
+
"测试中文: '计算均方误差' -> ['计ç®Ĺ', 'åĿĩ', 'æĸ¹', '误差']\n",
|
| 570 |
+
" -> ⭐ 还可以,常用中文词汇被合并了。\n",
|
| 571 |
+
"\n",
|
| 572 |
+
"==================== 4. 压缩率 PK (vs GPT-2 标准) ====================\n",
|
| 573 |
+
"同一段文本 Token 数量:\n",
|
| 574 |
+
" GPT-2 原版: 131\n",
|
| 575 |
+
" 你的分词器: 102\n",
|
| 576 |
+
"\n",
|
| 577 |
+
"🚀 结论: 你的分词器比 GPT-2 节省了 22.14% 的长度!\n",
|
| 578 |
+
" (对于代码模型,这意味着能读更长的上下文,训练速度更快)\n"
|
| 579 |
+
]
|
| 580 |
+
}
|
| 581 |
+
],
|
| 582 |
+
"source": [
|
| 583 |
+
"from transformers import AutoTokenizer\n",
|
| 584 |
+
"import json\n",
|
| 585 |
+
"\n",
|
| 586 |
+
"# ================= 配置 =================\n",
|
| 587 |
+
"# 指向你刚才保存的文件夹路径\n",
|
| 588 |
+
"TOKENIZER_PATH = \"./SEDDcoder_tokenizer\"\n",
|
| 589 |
+
"\n",
|
| 590 |
+
"# 准备测试文本:包含 Python 代码、数学公式、中文\n",
|
| 591 |
+
"# 这正好对应你训练的三个主要领域\n",
|
| 592 |
+
"TEST_TEXT = \"\"\"def calculate_loss(y_true, y_pred):\n",
|
| 593 |
+
" # This is a comment\n",
|
| 594 |
+
" # 计算均方误差\n",
|
| 595 |
+
" error = y_true - y_pred\n",
|
| 596 |
+
" mse = np.mean(error ** 2)\n",
|
| 597 |
+
" return mse\n",
|
| 598 |
+
"\n",
|
| 599 |
+
"# Math formula:\n",
|
| 600 |
+
"# f(x) = \\frac{1}{\\sqrt{2\\pi\\sigma^2}} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\n",
|
| 601 |
+
"\"\"\"\n",
|
| 602 |
+
"\n",
|
| 603 |
+
"# ================= 开始测试 =================\n",
|
| 604 |
+
"\n",
|
| 605 |
+
"def print_separator(title):\n",
|
| 606 |
+
" print(f\"\\n{'='*20} {title} {'='*20}\")\n",
|
| 607 |
+
"\n",
|
| 608 |
+
"try:\n",
|
| 609 |
+
" # 1. 加载分词器\n",
|
| 610 |
+
" print(\"正在加载分词器...\")\n",
|
| 611 |
+
" tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH)\n",
|
| 612 |
+
" print(f\"✅ 加载成功!词表大小: {tokenizer.vocab_size}\")\n",
|
| 613 |
+
"\n",
|
| 614 |
+
" # 2. 基础编码测试 (Eye Test)\n",
|
| 615 |
+
" print_separator(\"1. 肉眼观察切分效果\")\n",
|
| 616 |
+
" tokens = tokenizer.tokenize(TEST_TEXT)\n",
|
| 617 |
+
" \n",
|
| 618 |
+
" # 打印前 30 个 token 看看\n",
|
| 619 |
+
" print(\"前 5000 个 Token 展示 (注意 Ġ 代表空格):\")\n",
|
| 620 |
+
" for i, t in enumerate(tokens[:5000]):\n",
|
| 621 |
+
" print(f\"[{i:2d}] {t}\")\n",
|
| 622 |
+
"\n",
|
| 623 |
+
" # 3. 还原测试 (Round-Trip)\n",
|
| 624 |
+
" print_separator(\"2. 无损还原测试\")\n",
|
| 625 |
+
" token_ids = tokenizer.encode(TEST_TEXT)\n",
|
| 626 |
+
" decoded_text = tokenizer.decode(token_ids)\n",
|
| 627 |
+
" \n",
|
| 628 |
+
" if decoded_text == TEST_TEXT:\n",
|
| 629 |
+
" print(\"✅ 完美还原!输入与输出完全一致。\")\n",
|
| 630 |
+
" else:\n",
|
| 631 |
+
" print(\"❌ 还原失败!分词器可能丢失了信息。\")\n",
|
| 632 |
+
" # 调试用:\n",
|
| 633 |
+
" # print(\"原文:\", repr(TEST_TEXT))\n",
|
| 634 |
+
" # print(\"还原:\", repr(decoded_text))\n",
|
| 635 |
+
"\n",
|
| 636 |
+
" # 4. 关键能力检查\n",
|
| 637 |
+
" print_separator(\"3. 关键能力核查\")\n",
|
| 638 |
+
" \n",
|
| 639 |
+
" # Check 1: 缩进是否被压缩?\n",
|
| 640 |
+
" # 在 Python 中,4个空格非常常见。好的 Coder Tokenizer 会把它变成 1 个 token\n",
|
| 641 |
+
" indent_text = \" \" # 4个空格\n",
|
| 642 |
+
" indent_tokens = tokenizer.tokenize(indent_text)\n",
|
| 643 |
+
" print(f\"测试 4个空格缩进: {indent_tokens}\")\n",
|
| 644 |
+
" if len(indent_tokens) == 1:\n",
|
| 645 |
+
" print(\" -> ⭐ 优秀!4个空格被识别为 1 个 token。\")\n",
|
| 646 |
+
" else:\n",
|
| 647 |
+
" print(f\" -> 普通。4个空格被切成了 {len(indent_tokens)} 个 token。\")\n",
|
| 648 |
+
"\n",
|
| 649 |
+
" # Check 2: 常用关键字是否完整?\n",
|
| 650 |
+
" keywords = [\"def\", \"return\", \"import\", \"class\", \"numpy\"]\n",
|
| 651 |
+
" print(\"\\n测试关键字完整性:\")\n",
|
| 652 |
+
" for kw in keywords:\n",
|
| 653 |
+
" # 注意:要在前面加个空格,模拟代码中的 \" def\" 场景\n",
|
| 654 |
+
" t = tokenizer.tokenize(\" \" + kw)\n",
|
| 655 |
+
" print(f\" ' {kw}' -> {t}\")\n",
|
| 656 |
+
"\n",
|
| 657 |
+
" # Check 3: 中文是否乱码?\n",
|
| 658 |
+
" chinese_sample = \"计算均方误差\"\n",
|
| 659 |
+
" cn_tokens = tokenizer.tokenize(chinese_sample)\n",
|
| 660 |
+
" print(f\"\\n测试中文: '{chinese_sample}' -> {cn_tokens}\")\n",
|
| 661 |
+
" if len(cn_tokens) <= 4:\n",
|
| 662 |
+
" print(\" -> ⭐ 还可以,常用中文词汇被合并了。\")\n",
|
| 663 |
+
" else:\n",
|
| 664 |
+
" print(\" -> 略碎,可能是因为中文语料占比只有 5%。\")\n",
|
| 665 |
+
"\n",
|
| 666 |
+
" # 5. 简单压缩率对比 (vs GPT-2)\n",
|
| 667 |
+
" print_separator(\"4. 压缩率 PK (vs GPT-2 标准)\")\n",
|
| 668 |
+
" from transformers import GPT2Tokenizer\n",
|
| 669 |
+
" try:\n",
|
| 670 |
+
" gpt2_tok = GPT2Tokenizer.from_pretrained(\"gpt2\")\n",
|
| 671 |
+
" len_gpt2 = len(gpt2_tok.encode(TEST_TEXT))\n",
|
| 672 |
+
" len_yours = len(token_ids)\n",
|
| 673 |
+
" \n",
|
| 674 |
+
" print(f\"同一段文本 Token 数量:\")\n",
|
| 675 |
+
" print(f\" GPT-2 原版: {len_gpt2}\")\n",
|
| 676 |
+
" print(f\" 你的分词器: {len_yours}\")\n",
|
| 677 |
+
" \n",
|
| 678 |
+
" improvement = (len_gpt2 - len_yours) / len_gpt2 * 100\n",
|
| 679 |
+
" print(f\"\\n🚀 结论: 你的分词器比 GPT-2 节省了 {improvement:.2f}% 的长度!\")\n",
|
| 680 |
+
" if improvement > 10:\n",
|
| 681 |
+
" print(\" (对于代码模型,这意味着能读更长的上下文,训练速度更快)\")\n",
|
| 682 |
+
" \n",
|
| 683 |
+
" except Exception as e:\n",
|
| 684 |
+
" print(\"跳过对比 (未安装或无法连接 GPT-2):\", e)\n",
|
| 685 |
+
"\n",
|
| 686 |
+
"except Exception as e:\n",
|
| 687 |
+
" print(f\"\\n❌ 发生错误: {e}\")\n",
|
| 688 |
+
" print(\"请检查路径是否正确,或是否安装了 tokenizers 库。\")"
|
| 689 |
+
]
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"cell_type": "code",
|
| 693 |
+
"execution_count": 8,
|
| 694 |
+
"id": "ac1a0ffb-f6fa-4c65-9589-fc06b1e09e0c",
|
| 695 |
+
"metadata": {},
|
| 696 |
+
"outputs": [
|
| 697 |
+
{
|
| 698 |
+
"name": "stdout",
|
| 699 |
+
"output_type": "stream",
|
| 700 |
+
"text": [
|
| 701 |
+
"正在加载 GPT-2 官方分词器...\n",
|
| 702 |
+
"\n",
|
| 703 |
+
"=== GPT-2 (官方) 的切分结果 ===\n",
|
| 704 |
+
"['def', 'Ġcalculate', '_', 'loss', '(', 'y', '_', 'true', ',', 'Ġy', '_', 'pred', '):', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'Ġ#', 'Ġè', '®', '¡', 'ç', '®', 'Ĺ', 'å', 'Ŀ', 'ĩ', 'æĸ¹', 'è¯', '¯', 'å·', '®', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'Ġreturn', 'Ġm', 'se']\n",
|
| 705 |
+
"Token 数量: 39\n",
|
| 706 |
+
"\n",
|
| 707 |
+
"=== 你的 Coder 分词器 的切分结果 ===\n",
|
| 708 |
+
"['def', 'Ġcalculate', '_', 'loss', '(', 'y', '_', 'true', ',', 'Ġy', '_', 'pred', '):', 'Ċ', 'ĠĠĠ', 'Ġ#', 'Ġ', '计ç®Ĺ', 'åĿĩ', 'æĸ¹', '误差', 'Ċ', 'ĠĠĠ', 'Ġreturn', 'Ġmse']\n",
|
| 709 |
+
"Token 数量: 25\n"
|
| 710 |
+
]
|
| 711 |
+
}
|
| 712 |
+
],
|
| 713 |
+
"source": [
|
| 714 |
+
"from transformers import AutoTokenizer\n",
|
| 715 |
+
"\n",
|
| 716 |
+
"# 1. 加载官方 GPT-2 分词器\n",
|
| 717 |
+
"print(\"正在加载 GPT-2 官方分词器...\")\n",
|
| 718 |
+
"gpt2_tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n",
|
| 719 |
+
"\n",
|
| 720 |
+
"# 2. 同样的测试文本\n",
|
| 721 |
+
"text = \"\"\"def calculate_loss(y_true, y_pred):\n",
|
| 722 |
+
" # 计算均方误差\n",
|
| 723 |
+
" return mse\"\"\"\n",
|
| 724 |
+
"\n",
|
| 725 |
+
"# 3. 打印 GPT-2 的切分结果\n",
|
| 726 |
+
"print(\"\\n=== GPT-2 (官方) 的切分结果 ===\")\n",
|
| 727 |
+
"gpt2_tokens = gpt2_tokenizer.tokenize(text)\n",
|
| 728 |
+
"print(gpt2_tokens)\n",
|
| 729 |
+
"print(f\"Token 数量: {len(gpt2_tokens)}\")\n",
|
| 730 |
+
"\n",
|
| 731 |
+
"# -------------------------------------------\n",
|
| 732 |
+
"# 如果你想对比你自己的 (假设路径在 ./SEDDcoder_tokenizer)\n",
|
| 733 |
+
"# -------------------------------------------\n",
|
| 734 |
+
"try:\n",
|
| 735 |
+
" print(\"\\n=== 你的 Coder 分词器 的切分结果 ===\")\n",
|
| 736 |
+
" my_tokenizer = AutoTokenizer.from_pretrained(\"./SEDDcoder_tokenizer\")\n",
|
| 737 |
+
" my_tokens = my_tokenizer.tokenize(text)\n",
|
| 738 |
+
" print(my_tokens)\n",
|
| 739 |
+
" print(f\"Token 数量: {len(my_tokens)}\")\n",
|
| 740 |
+
"except:\n",
|
| 741 |
+
" pass"
|
| 742 |
+
]
|
| 743 |
+
},
|
| 744 |
+
{
|
| 745 |
+
"cell_type": "code",
|
| 746 |
+
"execution_count": 11,
|
| 747 |
+
"id": "679befca-f975-499d-92f7-c3b5ea8d58b7",
|
| 748 |
+
"metadata": {},
|
| 749 |
+
"outputs": [
|
| 750 |
+
{
|
| 751 |
+
"name": "stdout",
|
| 752 |
+
"output_type": "stream",
|
| 753 |
+
"text": [
|
| 754 |
+
"【原始文本】: kwargs\n",
|
| 755 |
+
"\n",
|
| 756 |
+
"--- A. 查看切分并手动还原 ---\n",
|
| 757 |
+
"【切分结果】: ['kwargs']\n",
|
| 758 |
+
"【还原结果】: kwargs\n",
|
| 759 |
+
"\n",
|
| 760 |
+
"--- B. 模拟模型输入输出 (ID 层面) ---\n",
|
| 761 |
+
"【Token IDs】: [2251]\n",
|
| 762 |
+
"【解码结果】: kwargs\n",
|
| 763 |
+
"\n",
|
| 764 |
+
"--- 验证 ---\n",
|
| 765 |
+
"✅ 成功!能够完美还原(无损)。\n"
|
| 766 |
+
]
|
| 767 |
+
}
|
| 768 |
+
],
|
| 769 |
+
"source": [
|
| 770 |
+
"from transformers import AutoTokenizer\n",
|
| 771 |
+
"\n",
|
| 772 |
+
"# 1. 加载你训练好的分词器\n",
|
| 773 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"./SEDDcoder_tokenizer\")\n",
|
| 774 |
+
"\n",
|
| 775 |
+
"# 2. 准备一段测试文本 (包含代码、中文、英文)\n",
|
| 776 |
+
"text = \"kwargs\"\n",
|
| 777 |
+
"print(f\"【原始文本】: {text}\")\n",
|
| 778 |
+
"\n",
|
| 779 |
+
"# ================= 方法 A: 仅在字符串层面操作 =================\n",
|
| 780 |
+
"print(\"\\n--- A. 查看切分并手动还原 ---\")\n",
|
| 781 |
+
"\n",
|
| 782 |
+
"# 第一步:切分 (Text -> Tokens)\n",
|
| 783 |
+
"# 这一步让你看到它是怎么“下刀”的\n",
|
| 784 |
+
"tokens = tokenizer.tokenize(text)\n",
|
| 785 |
+
"print(f\"【切分结果】: {tokens}\")\n",
|
| 786 |
+
"# 你会看到类似: ['def', 'Ġcalculate', '_', 'loss', '(', '计ç®', ... ]\n",
|
| 787 |
+
"\n",
|
| 788 |
+
"# 第二步:还原 (Tokens -> Text)\n",
|
| 789 |
+
"# 这一步把 list 里的怪符号合并回正常字符串\n",
|
| 790 |
+
"restored_text_from_tokens = tokenizer.convert_tokens_to_string(tokens)\n",
|
| 791 |
+
"print(f\"【还原结果】: {restored_text_from_tokens}\")\n",
|
| 792 |
+
"\n",
|
| 793 |
+
"\n",
|
| 794 |
+
"# ================= 方法 B: 模拟模型真实工作流程 (推荐) =================\n",
|
| 795 |
+
"print(\"\\n--- B. 模拟模型输入输出 (ID 层面) ---\")\n",
|
| 796 |
+
"\n",
|
| 797 |
+
"# 第一步:编码 (Text -> IDs)\n",
|
| 798 |
+
"# 模型看不懂字符串,只看懂数字。这是真正喂给 GPT/Llama 的东西\n",
|
| 799 |
+
"input_ids = tokenizer.encode(text)\n",
|
| 800 |
+
"print(f\"【Token IDs】: {input_ids}\")\n",
|
| 801 |
+
"\n",
|
| 802 |
+
"# 第二步:解码 (IDs -> Text)\n",
|
| 803 |
+
"# 模型输出数字后,我们用这个方法变回人类语言\n",
|
| 804 |
+
"decoded_text = tokenizer.decode(input_ids)\n",
|
| 805 |
+
"print(f\"【解码结果】: {decoded_text}\")\n",
|
| 806 |
+
"\n",
|
| 807 |
+
"# ================= 验证 =================\n",
|
| 808 |
+
"print(\"\\n--- 验证 ---\")\n",
|
| 809 |
+
"if text == decoded_text:\n",
|
| 810 |
+
" print(\"✅ 成功!能够完美还原(无损)。\")\n",
|
| 811 |
+
"else:\n",
|
| 812 |
+
" print(\"❌ 失败!还原后与原文不一致。\")"
|
| 813 |
+
]
|
| 814 |
+
},
|
| 815 |
+
{
|
| 816 |
+
"cell_type": "code",
|
| 817 |
+
"execution_count": 23,
|
| 818 |
+
"id": "ffa8642a-c6e0-47fd-b92a-285eb3e9b9f0",
|
| 819 |
+
"metadata": {},
|
| 820 |
+
"outputs": [
|
| 821 |
+
{
|
| 822 |
+
"name": "stdout",
|
| 823 |
+
"output_type": "stream",
|
| 824 |
+
"text": [
|
| 825 |
+
"正在加载分词器...\n",
|
| 826 |
+
"1. ./SEDDcoder_tokenizer\n",
|
| 827 |
+
"2. gpt2\n",
|
| 828 |
+
"✅ 加载完成!\n",
|
| 829 |
+
"\n",
|
| 830 |
+
"\n",
|
| 831 |
+
"📊 === 总体性能排行榜 (Token 越少越好) ===\n",
|
| 832 |
+
"+---------------------------------+------------+------------+---------------+\n",
|
| 833 |
+
"| 场景 | 你的长度 | GPT2长度 | 🚀 节省空间 |\n",
|
| 834 |
+
"+=================================+============+============+===============+\n",
|
| 835 |
+
"| 1. 真实Python函数 (含Docstring) | 159 | 253 | 37.2% |\n",
|
| 836 |
+
"+---------------------------------+------------+------------+---------------+\n",
|
| 837 |
+
"\n",
|
| 838 |
+
"\n",
|
| 839 |
+
"🔍 === 详细切分对比 ===\n",
|
| 840 |
+
"\n",
|
| 841 |
+
"============================================================\n",
|
| 842 |
+
"【场景 1】: 1. 真实Python函数 (含Docstring)\n",
|
| 843 |
+
"------------------------------------------------------------\n",
|
| 844 |
+
"[GPT-2] (253 tokens):\n",
|
| 845 |
+
"['def', 'Ġget', '_', 'n', 'ested', '_', 'attribute', '(', 'obj', ':', 'Ġobject', ',', 'Ġdot', '_', 'expr', ':', 'Ġstr', ')', 'Ġ->', 'Ġobject', ':', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'Ġ\"\"\"', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'ĠRet', 'rie', 'ves', 'Ġa', 'Ġnested', 'Ġattribute', 'Ġfrom', 'Ġan', 'Ġobject', 'Ġusing', 'Ġa', 'Ġdot', 'Ġnotation', 'Ġstring', '.', 'ĊĊ', 'Ġ', 'Ġ', 'Ġ', 'ĠAr'] ... (只显示前50个)\n",
|
| 846 |
+
"\n",
|
| 847 |
+
"[Yours] (159 tokens):\n",
|
| 848 |
+
"['def', 'Ġget', '_', 'nested', '_', 'attribute', '(', 'obj', ':', 'Ġobject', ',', 'Ġdot', '_', 'expr', ':', 'Ġstr', ')', 'Ġ->', 'Ġobject', ':', 'Ċ', 'ĠĠĠ', 'Ġ\"\"\"', 'Ċ', 'ĠĠĠ', 'ĠRetrieves', 'Ġa', 'Ġnested', 'Ġattribute', 'Ġfrom', 'Ġan', 'Ġobject', 'Ġusing', 'Ġa', 'Ġdot', 'Ġnotation', 'Ġstring', '.', 'Ċ', 'Ċ', 'ĠĠĠ', 'ĠArgs', ':', 'Ċ', 'ĠĠĠĠĠĠĠ', 'Ġobj', 'Ġ(', 'object', '):', 'ĠThe'] ... (只显示前50个)\n",
|
| 849 |
+
"------------------------------------------------------------\n",
|
| 850 |
+
">>> 结论: 你比 GPT-2 少用了 94 个 Token (节省 37.2%)\n",
|
| 851 |
+
"\n"
|
| 852 |
+
]
|
| 853 |
+
}
|
| 854 |
+
],
|
| 855 |
+
"source": [
|
| 856 |
+
"import pandas as pd\n",
|
| 857 |
+
"from transformers import AutoTokenizer\n",
|
| 858 |
+
"\n",
|
| 859 |
+
"# ================= 1. 配置路径 =================\n",
|
| 860 |
+
"MY_TOKENIZER_PATH = \"./SEDDcoder_tokenizer\"\n",
|
| 861 |
+
"COMPARE_MODEL = \"gpt2\"\n",
|
| 862 |
+
"\n",
|
| 863 |
+
"print(f\"正在加载分词器...\\n1. {MY_TOKENIZER_PATH}\\n2. {COMPARE_MODEL}\")\n",
|
| 864 |
+
"try:\n",
|
| 865 |
+
" my_tok = AutoTokenizer.from_pretrained(MY_TOKENIZER_PATH)\n",
|
| 866 |
+
" gpt2_tok = AutoTokenizer.from_pretrained(COMPARE_MODEL)\n",
|
| 867 |
+
" print(\"✅ 加载完成!\\n\")\n",
|
| 868 |
+
"except Exception as e:\n",
|
| 869 |
+
" print(f\"❌ 加载失败: {e}\")\n",
|
| 870 |
+
" exit()\n",
|
| 871 |
+
"\n",
|
| 872 |
+
"# ================= 2. 定义测试用例 (已修复引号和缩进) =================\n",
|
| 873 |
+
"test_cases = [\n",
|
| 874 |
+
" {\n",
|
| 875 |
+
" \"场景\": \"1. 真实Python函数 (含Docstring)\",\n",
|
| 876 |
+
" # 注意:这里使用了三个单引号 ''' 来包裹整个字符串\n",
|
| 877 |
+
" # 这样内部的 \"\"\" 和换行符就不会报错了\n",
|
| 878 |
+
" \"文本\": '''def get_nested_attribute(obj: object, dot_expr: str) -> object:\n",
|
| 879 |
+
" \"\"\"\n",
|
| 880 |
+
" Retrieves a nested attribute from an object using a dot notation string.\n",
|
| 881 |
+
"\n",
|
| 882 |
+
" Args:\n",
|
| 883 |
+
" obj (object): The object to retrieve the attribute from.\n",
|
| 884 |
+
" dot_expr (str): The dot notation string representing the attribute path.\n",
|
| 885 |
+
"\n",
|
| 886 |
+
" Returns:\n",
|
| 887 |
+
" object: The retrieved attribute value.\n",
|
| 888 |
+
" \"\"\"\n",
|
| 889 |
+
" for component in dot_expr.split(\".\"):\n",
|
| 890 |
+
" component = component.strip()\n",
|
| 891 |
+
" if not component:\n",
|
| 892 |
+
" continue\n",
|
| 893 |
+
" try:\n",
|
| 894 |
+
" obj = getattr(obj, component)\n",
|
| 895 |
+
" except AttributeError:\n",
|
| 896 |
+
" raise ValueError(f\"Attribute '{component}' does not exist\")\n",
|
| 897 |
+
" return obj'''\n",
|
| 898 |
+
" },\n",
|
| 899 |
+
"]\n",
|
| 900 |
+
"\n",
|
| 901 |
+
"# ================= 3. 执行对比逻辑 =================\n",
|
| 902 |
+
"results = []\n",
|
| 903 |
+
"detailed_logs = []\n",
|
| 904 |
+
"\n",
|
| 905 |
+
"for idx, case in enumerate(test_cases):\n",
|
| 906 |
+
" text = case[\"文本\"]\n",
|
| 907 |
+
" scenario = case[\"场景\"]\n",
|
| 908 |
+
" \n",
|
| 909 |
+
" # 1. 切分\n",
|
| 910 |
+
" tokens_mine = my_tok.tokenize(text)\n",
|
| 911 |
+
" tokens_gpt2 = gpt2_tok.tokenize(text)\n",
|
| 912 |
+
" \n",
|
| 913 |
+
" # 2. 计数\n",
|
| 914 |
+
" count_mine = len(tokens_mine)\n",
|
| 915 |
+
" count_gpt2 = len(tokens_gpt2)\n",
|
| 916 |
+
" \n",
|
| 917 |
+
" # 3. 计算节省率\n",
|
| 918 |
+
" if count_gpt2 > 0:\n",
|
| 919 |
+
" saving = (count_gpt2 - count_mine) / count_gpt2 * 100\n",
|
| 920 |
+
" else:\n",
|
| 921 |
+
" saving = 0.0\n",
|
| 922 |
+
" \n",
|
| 923 |
+
" # 存入摘要表\n",
|
| 924 |
+
" results.append({\n",
|
| 925 |
+
" \"场景\": scenario,\n",
|
| 926 |
+
" \"你的长度\": count_mine,\n",
|
| 927 |
+
" \"GPT2长度\": count_gpt2,\n",
|
| 928 |
+
" \"🚀 节省空间\": f\"{saving:.1f}%\"\n",
|
| 929 |
+
" })\n",
|
| 930 |
+
" \n",
|
| 931 |
+
" # 存入详细对比日志\n",
|
| 932 |
+
" detailed_logs.append(f\"\"\"\n",
|
| 933 |
+
"{'='*60}\n",
|
| 934 |
+
"【场景 {idx+1}】: {scenario}\n",
|
| 935 |
+
"------------------------------------------------------------\n",
|
| 936 |
+
"[GPT-2] ({count_gpt2} tokens):\n",
|
| 937 |
+
"{tokens_gpt2[:50]} ... (只显示前50个)\n",
|
| 938 |
+
"\n",
|
| 939 |
+
"[Yours] ({count_mine} tokens):\n",
|
| 940 |
+
"{tokens_mine[:50]} ... (只显示前50个)\n",
|
| 941 |
+
"------------------------------------------------------------\n",
|
| 942 |
+
">>> 结论: 你比 GPT-2 少用了 {count_gpt2 - count_mine} 个 Token (节省 {saving:.1f}%)\n",
|
| 943 |
+
"\"\"\")\n",
|
| 944 |
+
"\n",
|
| 945 |
+
"# ================= 4. 输出结果 =================\n",
|
| 946 |
+
"df = pd.DataFrame(results)\n",
|
| 947 |
+
"print(\"\\n📊 === 总体性能排行榜 (Token 越少越好) ===\")\n",
|
| 948 |
+
"# 尝试使用 tabulate 打印美观表格,如果没有安装则默认打印\n",
|
| 949 |
+
"try:\n",
|
| 950 |
+
" print(df.to_markdown(index=False, tablefmt=\"grid\"))\n",
|
| 951 |
+
"except:\n",
|
| 952 |
+
" print(df)\n",
|
| 953 |
+
"\n",
|
| 954 |
+
"print(\"\\n\\n🔍 === 详细切分对比 ===\")\n",
|
| 955 |
+
"for log in detailed_logs:\n",
|
| 956 |
+
" print(log)"
|
| 957 |
+
]
|
| 958 |
+
},
|
| 959 |
+
{
|
| 960 |
+
"cell_type": "code",
|
| 961 |
+
"execution_count": 16,
|
| 962 |
+
"id": "33319993-5a0b-4506-ba05-f0530bde8063",
|
| 963 |
+
"metadata": {},
|
| 964 |
+
"outputs": [
|
| 965 |
+
{
|
| 966 |
+
"name": "stderr",
|
| 967 |
+
"output_type": "stream",
|
| 968 |
+
"text": [
|
| 969 |
+
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
| 970 |
+
"To disable this warning, you can either:\n",
|
| 971 |
+
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
| 972 |
+
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
|
| 973 |
+
]
|
| 974 |
+
},
|
| 975 |
+
{
|
| 976 |
+
"name": "stdout",
|
| 977 |
+
"output_type": "stream",
|
| 978 |
+
"text": [
|
| 979 |
+
"Collecting tabulate\n",
|
| 980 |
+
" Downloading tabulate-0.9.0-py3-none-any.whl.metadata (34 kB)\n",
|
| 981 |
+
"Downloading tabulate-0.9.0-py3-none-any.whl (35 kB)\n",
|
| 982 |
+
"Installing collected packages: tabulate\n",
|
| 983 |
+
"Successfully installed tabulate-0.9.0\n",
|
| 984 |
+
"Note: you may need to restart the kernel to use updated packages.\n"
|
| 985 |
+
]
|
| 986 |
+
}
|
| 987 |
+
],
|
| 988 |
+
"source": [
|
| 989 |
+
"pip install tabulate"
|
| 990 |
+
]
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"cell_type": "code",
|
| 994 |
+
"execution_count": 5,
|
| 995 |
+
"id": "d6de1c39-42e2-494e-a354-9a099cbfff66",
|
| 996 |
+
"metadata": {},
|
| 997 |
+
"outputs": [],
|
| 998 |
+
"source": [
|
| 999 |
+
"import os\n",
|
| 1000 |
+
"import glob\n",
|
| 1001 |
+
"from datasets import load_dataset\n",
|
| 1002 |
+
"from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers\n",
|
| 1003 |
+
"from tqdm import tqdm\n",
|
| 1004 |
+
"from transformers import GPT2TokenizerFast\n",
|
| 1005 |
+
"\n",
|
| 1006 |
+
"# ================= 1. 全局配置 =================\n",
|
| 1007 |
+
"\n",
|
| 1008 |
+
"# 总采样大小: 5GB\n",
|
| 1009 |
+
"TOTAL_SIZE_GB = 5\n",
|
| 1010 |
+
"OUTPUT_DIR = \"./tokenizer_train_data_5gb\"\n",
|
| 1011 |
+
"\n",
|
| 1012 |
+
"# 数据集配比配置\n",
|
| 1013 |
+
"DATASET_CONFIG = [\n",
|
| 1014 |
+
" # --- 1. 代码 (70% = 3.5GB) ---\n",
|
| 1015 |
+
" # 指定使用 Swallow-Code 的 \"exp11-scor\" 子集\n",
|
| 1016 |
+
" {\n",
|
| 1017 |
+
" \"name\": \"swallow_code\",\n",
|
| 1018 |
+
" \"hf_id\": \"tokyotech-llm/swallow-code\",\n",
|
| 1019 |
+
" \"subset\": \"exp11-scor\", \n",
|
| 1020 |
+
" \"ratio\": 0.70,\n",
|
| 1021 |
+
" },\n",
|
| 1022 |
+
" \n",
|
| 1023 |
+
" # --- 2. 数学 (10% = 0.5GB) ---\n",
|
| 1024 |
+
" # Swallow-Math\n",
|
| 1025 |
+
" {\n",
|
| 1026 |
+
" \"name\": \"swallow_math\",\n",
|
| 1027 |
+
" \"hf_id\": \"tokyotech-llm/swallow-math\",\n",
|
| 1028 |
+
" \"subset\": None, # 默认配置\n",
|
| 1029 |
+
" \"ratio\": 0.10,\n",
|
| 1030 |
+
" },\n",
|
| 1031 |
+
" \n",
|
| 1032 |
+
" # --- 3. 英文通用 (15% = 0.75GB) ---\n",
|
| 1033 |
+
" # Fineweb\n",
|
| 1034 |
+
" {\n",
|
| 1035 |
+
" \"name\": \"english_fineweb\",\n",
|
| 1036 |
+
" \"hf_id\": \"HuggingFaceFW/fineweb-edu\", \n",
|
| 1037 |
+
" \"subset\": \"sample-10BT\", # 使用它的 100亿 token 采样版,足够了\n",
|
| 1038 |
+
" \"ratio\": 0.15,\n",
|
| 1039 |
+
" },\n",
|
| 1040 |
+
" \n",
|
| 1041 |
+
" # --- 4. 中文通用 (5% = 0.25GB) ---\n",
|
| 1042 |
+
" # SkyPile (高质量中文)\n",
|
| 1043 |
+
" {\n",
|
| 1044 |
+
" \"name\": \"chinese_skypile\",\n",
|
| 1045 |
+
" \"hf_id\": \"Skywork/SkyPile-150B\",\n",
|
| 1046 |
+
" \"subset\": None,\n",
|
| 1047 |
+
" \"ratio\": 0.05,\n",
|
| 1048 |
+
" }\n",
|
| 1049 |
+
"]\n",
|
| 1050 |
+
"\n",
|
| 1051 |
+
"# ================= 2. 智能文本提取 =================\n",
|
| 1052 |
+
"\n",
|
| 1053 |
+
"def extract_text_from_sample(sample):\n",
|
| 1054 |
+
" # 1. 优先查找单一文本列 (按优先级排序)\n",
|
| 1055 |
+
" # 大多数数据集都在这里能找到\n",
|
| 1056 |
+
" text_cols = [\"content\", \"text\", \"body\", \"code\", \"response\"] \n",
|
| 1057 |
+
" for col in text_cols:\n",
|
| 1058 |
+
" if col in sample and isinstance(sample[col], str) and len(sample[col]) > 0:\n",
|
| 1059 |
+
" return sample[col]\n",
|
| 1060 |
+
" \n",
|
| 1061 |
+
" # 2. 特殊处理:数学问答对\n",
|
| 1062 |
+
" if \"question\" in sample and \"answer\" in sample:\n",
|
| 1063 |
+
" q = sample.get(\"question\", \"\")\n",
|
| 1064 |
+
" a = sample.get(\"answer\", \"\")\n",
|
| 1065 |
+
" return f\"Question:\\n{q}\\n\\nAnswer:\\n{a}\"\n",
|
| 1066 |
+
" \n",
|
| 1067 |
+
" # 3. 如果没找到,返回 None,让主循环跳过,而不是盲猜\n",
|
| 1068 |
+
" return None\n",
|
| 1069 |
+
"\n",
|
| 1070 |
+
"# ================= 3. 优化后的采样逻辑 =================\n",
|
| 1071 |
+
"\n",
|
| 1072 |
+
"def sample_data():\n",
|
| 1073 |
+
" if not os.path.exists(OUTPUT_DIR):\n",
|
| 1074 |
+
" os.makedirs(OUTPUT_DIR)\n",
|
| 1075 |
+
" \n",
|
| 1076 |
+
" print(f\"=== 开始采样 | 目标总大小: {TOTAL_SIZE_GB} GB ===\")\n",
|
| 1077 |
+
" \n",
|
| 1078 |
+
" generated_files = []\n",
|
| 1079 |
+
"\n",
|
| 1080 |
+
" for config in DATASET_CONFIG:\n",
|
| 1081 |
+
" target_bytes = int(config[\"ratio\"] * TOTAL_SIZE_GB * 1024**3)\n",
|
| 1082 |
+
" output_file = os.path.join(OUTPUT_DIR, f\"{config['name']}.txt\")\n",
|
| 1083 |
+
" generated_files.append(output_file)\n",
|
| 1084 |
+
"\n",
|
| 1085 |
+
" if os.path.exists(output_file) and os.path.getsize(output_file) >= target_bytes:\n",
|
| 1086 |
+
" print(f\"[已完成] {config['name']} (跳过)\")\n",
|
| 1087 |
+
" continue\n",
|
| 1088 |
+
"\n",
|
| 1089 |
+
" print(f\"\\n 正在下载: {config['name']}\")\n",
|
| 1090 |
+
"\n",
|
| 1091 |
+
" try:\n",
|
| 1092 |
+
" ds = load_dataset(\n",
|
| 1093 |
+
" config[\"hf_id\"], \n",
|
| 1094 |
+
" name=config[\"subset\"], \n",
|
| 1095 |
+
" split=\"train\", \n",
|
| 1096 |
+
" streaming=True, \n",
|
| 1097 |
+
" trust_remote_code=True\n",
|
| 1098 |
+
" )\n",
|
| 1099 |
+
" \n",
|
| 1100 |
+
" # 【优化】调大 buffer_size 以获得更好的随机性 (内存允许的话)\n",
|
| 1101 |
+
" ds_shuffled = ds.shuffle(buffer_size=100000, seed=42)\n",
|
| 1102 |
+
" \n",
|
| 1103 |
+
" current_bytes = 0\n",
|
| 1104 |
+
" pbar = tqdm(total=target_bytes, unit='B', unit_scale=True)\n",
|
| 1105 |
+
"\n",
|
| 1106 |
+
" with open(output_file, \"w\", encoding=\"utf-8\") as f:\n",
|
| 1107 |
+
" for sample in ds_shuffled:\n",
|
| 1108 |
+
" try:\n",
|
| 1109 |
+
" text = extract_text_from_sample(sample)\n",
|
| 1110 |
+
" \n",
|
| 1111 |
+
" # 【优化】严格过滤:如果是 None 或者太短,直接跳过\n",
|
| 1112 |
+
" if text is None or len(text) < 20: \n",
|
| 1113 |
+
" continue\n",
|
| 1114 |
+
" \n",
|
| 1115 |
+
" f.write(text + \"\\n<|endoftext|>\\n\")\n",
|
| 1116 |
+
" \n",
|
| 1117 |
+
" b_size = len(text.encode('utf-8'))\n",
|
| 1118 |
+
" current_bytes += b_size\n",
|
| 1119 |
+
" pbar.update(b_size)\n",
|
| 1120 |
+
"\n",
|
| 1121 |
+
" if current_bytes >= target_bytes:\n",
|
| 1122 |
+
" break\n",
|
| 1123 |
+
" except Exception:\n",
|
| 1124 |
+
" continue \n",
|
| 1125 |
+
" \n",
|
| 1126 |
+
" pbar.close()\n",
|
| 1127 |
+
"\n",
|
| 1128 |
+
" except Exception as e:\n",
|
| 1129 |
+
" print(f\"[错误] 处理 {config['name']} 失败: {e}\")\n",
|
| 1130 |
+
" \n",
|
| 1131 |
+
" return generated_files"
|
| 1132 |
+
]
|
| 1133 |
+
},
|
| 1134 |
+
{
|
| 1135 |
+
"cell_type": "code",
|
| 1136 |
+
"execution_count": 8,
|
| 1137 |
+
"id": "3d6d7bb1-7a40-4ecb-8045-b63749452d0a",
|
| 1138 |
+
"metadata": {},
|
| 1139 |
+
"outputs": [
|
| 1140 |
+
{
|
| 1141 |
+
"name": "stdout",
|
| 1142 |
+
"output_type": "stream",
|
| 1143 |
+
"text": [
|
| 1144 |
+
"正在加载分词器: /workspace/diffusionLLM/SEDDcoder_tokenizer ...\n",
|
| 1145 |
+
"\n",
|
| 1146 |
+
"📡 正在从 HuggingFace 抽取 50MB 代码数据...\n"
|
| 1147 |
+
]
|
| 1148 |
+
},
|
| 1149 |
+
{
|
| 1150 |
+
"name": "stderr",
|
| 1151 |
+
"output_type": "stream",
|
| 1152 |
+
"text": [
|
| 1153 |
+
"\n",
|
| 1154 |
+
"Sampling: 0%| | 0.00/52.4M [00:00<?, ?B/s]\u001b[A\n",
|
| 1155 |
+
"Sampling: 0%| | 6.72k/52.4M [00:00<2:08:57, 6.78kB/s]\u001b[A\n",
|
| 1156 |
+
"Sampling: 19%|█▉ | 10.0M/52.4M [00:01<00:05, 8.15MB/s] \u001b[A\n",
|
| 1157 |
+
"Sampling: 38%|███▊ | 20.0M/52.4M [00:02<00:02, 11.6MB/s]\u001b[A\n",
|
| 1158 |
+
"Sampling: 57%|█████▋ | 29.7M/52.4M [00:02<00:01, 18.8MB/s]\u001b[A\n",
|
| 1159 |
+
"Sampling: 64%|██████▍ | 33.7M/52.4M [00:02<00:01, 14.4MB/s]\u001b[A\n",
|
| 1160 |
+
"Sampling: 76%|███████▋ | 40.0M/52.4M [00:03<00:00, 14.0MB/s]\u001b[A\n",
|
| 1161 |
+
"Sampling: 52.4MB [00:03, 13.7MB/s] \u001b[A\n"
|
| 1162 |
+
]
|
| 1163 |
+
},
|
| 1164 |
+
{
|
| 1165 |
+
"name": "stdout",
|
| 1166 |
+
"output_type": "stream",
|
| 1167 |
+
"text": [
|
| 1168 |
+
"\n",
|
| 1169 |
+
"🧮 正在计算 Token 数量 (文本长度: 52390219 字符)...\n",
|
| 1170 |
+
"\n",
|
| 1171 |
+
"========================================\n",
|
| 1172 |
+
"📊 【你的 Tokenizer 效率报告】\n",
|
| 1173 |
+
"========================================\n",
|
| 1174 |
+
"采样数据: 50.00 MB\n",
|
| 1175 |
+
"Token 数量: 14303759\n",
|
| 1176 |
+
"Bytes/Token: 3.6655\n",
|
| 1177 |
+
"----------------------------------------\n",
|
| 1178 |
+
"基于你的 21.857 GB 数据总量估算:\n",
|
| 1179 |
+
"🔥 总 Token 数: \u001b[1;32m6.40 Billion (十亿)\u001b[0m\n",
|
| 1180 |
+
"----------------------------------------\n",
|
| 1181 |
+
"💡 训练建议:\n",
|
| 1182 |
+
" 你的数据量 (6.4B) 刚好或略少。\n",
|
| 1183 |
+
" ✅ 建议跑 4-8 Epochs 以充分榨干数据价值。\n",
|
| 1184 |
+
"========================================\n"
|
| 1185 |
+
]
|
| 1186 |
+
}
|
| 1187 |
+
],
|
| 1188 |
+
"source": [
|
| 1189 |
+
"import os\n",
|
| 1190 |
+
"from datasets import load_dataset\n",
|
| 1191 |
+
"from transformers import AutoTokenizer\n",
|
| 1192 |
+
"from tqdm import tqdm\n",
|
| 1193 |
+
"\n",
|
| 1194 |
+
"# ================= 配置区域 =================\n",
|
| 1195 |
+
"# 你的分词器路径 (确保文件夹名字对)\n",
|
| 1196 |
+
"TOKENIZER_PATH = \"/workspace/diffusionLLM/SEDDcoder_tokenizer\" \n",
|
| 1197 |
+
"# 目标采样大小 (50MB 足够精准了)\n",
|
| 1198 |
+
"TARGET_SIZE_MB = 50 \n",
|
| 1199 |
+
"# 使用高质量代码数据源\n",
|
| 1200 |
+
"DATASET_ID = \"tokyotech-llm/swallow-code\"\n",
|
| 1201 |
+
"SUBSET = \"exp11-scor\"\n",
|
| 1202 |
+
"\n",
|
| 1203 |
+
"def main():\n",
|
| 1204 |
+
" # 1. 加载你的分词器\n",
|
| 1205 |
+
" if not os.path.exists(TOKENIZER_PATH):\n",
|
| 1206 |
+
" print(f\"❌ 错误: 找不到分词器路径: {TOKENIZER_PATH}\")\n",
|
| 1207 |
+
" print(\"请确认你没有把分词器文件夹也一起删了。\")\n",
|
| 1208 |
+
" return\n",
|
| 1209 |
+
"\n",
|
| 1210 |
+
" print(f\"正在加载分词器: {TOKENIZER_PATH} ...\")\n",
|
| 1211 |
+
" try:\n",
|
| 1212 |
+
" tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH)\n",
|
| 1213 |
+
" except Exception as e:\n",
|
| 1214 |
+
" print(f\"❌ 分词器加载失败: {e}\")\n",
|
| 1215 |
+
" return\n",
|
| 1216 |
+
"\n",
|
| 1217 |
+
" # 2. 流式下载 50MB 数据\n",
|
| 1218 |
+
" print(f\"\\n📡 正在从 HuggingFace 抽取 {TARGET_SIZE_MB}MB 代码数据...\")\n",
|
| 1219 |
+
" try:\n",
|
| 1220 |
+
" # streaming=True 是关键,不下载全量,只读一点点\n",
|
| 1221 |
+
" ds = load_dataset(DATASET_ID, SUBSET, split=\"train\", streaming=True)\n",
|
| 1222 |
+
" \n",
|
| 1223 |
+
" collected_text = \"\"\n",
|
| 1224 |
+
" current_bytes = 0\n",
|
| 1225 |
+
" target_bytes = TARGET_SIZE_MB * 1024 * 1024\n",
|
| 1226 |
+
" \n",
|
| 1227 |
+
" pbar = tqdm(total=target_bytes, unit='B', unit_scale=True, desc=\"Sampling\")\n",
|
| 1228 |
+
" \n",
|
| 1229 |
+
" for sample in ds:\n",
|
| 1230 |
+
" # 兼容不同的列名\n",
|
| 1231 |
+
" text = sample.get('content') or sample.get('text') or sample.get('code')\n",
|
| 1232 |
+
" if not text: continue\n",
|
| 1233 |
+
" \n",
|
| 1234 |
+
" # 计算字节大小\n",
|
| 1235 |
+
" text_bytes = len(text.encode('utf-8'))\n",
|
| 1236 |
+
" \n",
|
| 1237 |
+
" collected_text += text\n",
|
| 1238 |
+
" current_bytes += text_bytes\n",
|
| 1239 |
+
" pbar.update(text_bytes)\n",
|
| 1240 |
+
" \n",
|
| 1241 |
+
" if current_bytes >= target_bytes:\n",
|
| 1242 |
+
" break\n",
|
| 1243 |
+
" pbar.close()\n",
|
| 1244 |
+
" \n",
|
| 1245 |
+
" except Exception as e:\n",
|
| 1246 |
+
" print(f\"❌ 网络错误或数据集连接失败: {e}\")\n",
|
| 1247 |
+
" return\n",
|
| 1248 |
+
"\n",
|
| 1249 |
+
" # 3. 计算压缩率\n",
|
| 1250 |
+
" print(f\"\\n🧮 正在计算 Token 数量 (文本长度: {len(collected_text)} 字符)...\")\n",
|
| 1251 |
+
" \n",
|
| 1252 |
+
" # 编码\n",
|
| 1253 |
+
" tokens = tokenizer.encode(collected_text)\n",
|
| 1254 |
+
" num_tokens = len(tokens)\n",
|
| 1255 |
+
" \n",
|
| 1256 |
+
" # 核心公式: 字节数 / Token数\n",
|
| 1257 |
+
" ratio = current_bytes / num_tokens\n",
|
| 1258 |
+
" \n",
|
| 1259 |
+
" # 4. 输出结论\n",
|
| 1260 |
+
" print(\"\\n\" + \"=\"*40)\n",
|
| 1261 |
+
" print(f\"📊 【你的 Tokenizer 效率报告】\")\n",
|
| 1262 |
+
" print(\"=\"*40)\n",
|
| 1263 |
+
" print(f\"采样数据: {current_bytes / 1024 / 1024:.2f} MB\")\n",
|
| 1264 |
+
" print(f\"Token 数量: {num_tokens}\")\n",
|
| 1265 |
+
" print(f\"Bytes/Token: {ratio:.4f}\")\n",
|
| 1266 |
+
" print(\"-\" * 40)\n",
|
| 1267 |
+
" \n",
|
| 1268 |
+
" # 5. 为你估算 21.857GB 的总 Token 数\n",
|
| 1269 |
+
" total_data_gb = 21.857\n",
|
| 1270 |
+
" total_tokens_billion = (total_data_gb * 1024**3) / ratio / 10**9\n",
|
| 1271 |
+
" \n",
|
| 1272 |
+
" print(f\"基于你的 21.857 GB 数据总量估算:\")\n",
|
| 1273 |
+
" print(f\"🔥 总 Token 数: \\033[1;32m{total_tokens_billion:.2f} Billion (十亿)\\033[0m\")\n",
|
| 1274 |
+
" \n",
|
| 1275 |
+
" print(\"-\" * 40)\n",
|
| 1276 |
+
" print(\"💡 训练建议:\")\n",
|
| 1277 |
+
" if total_tokens_billion > 8:\n",
|
| 1278 |
+
" print(f\" 你的数据量 ({total_tokens_billion:.1f}B) >> 模型需求 (8B)。\")\n",
|
| 1279 |
+
" print(\" ✅ 建议跑 1 个 Epoch 就够了 (Chinchilla Optimal)。\")\n",
|
| 1280 |
+
" print(\" 🚀 如果想强化代码能力,最多跑 2-4 Epochs。\")\n",
|
| 1281 |
+
" else:\n",
|
| 1282 |
+
" print(f\" 你的数据量 ({total_tokens_billion:.1f}B) 刚好或略少。\")\n",
|
| 1283 |
+
" print(\" ✅ 建议跑 4-8 Epochs 以充分榨干数据价值。\")\n",
|
| 1284 |
+
" print(\"=\"*40)\n",
|
| 1285 |
+
"\n",
|
| 1286 |
+
"if __name__ == \"__main__\":\n",
|
| 1287 |
+
" main()"
|
| 1288 |
+
]
|
| 1289 |
+
},
|
| 1290 |
+
{
|
| 1291 |
+
"cell_type": "code",
|
| 1292 |
+
"execution_count": null,
|
| 1293 |
+
"id": "09d91717-61ee-4054-8ca1-39c101086248",
|
| 1294 |
+
"metadata": {},
|
| 1295 |
+
"outputs": [],
|
| 1296 |
+
"source": []
|
| 1297 |
+
}
|
| 1298 |
+
],
|
| 1299 |
+
"metadata": {
|
| 1300 |
+
"kernelspec": {
|
| 1301 |
+
"display_name": "Python 3 (ipykernel)",
|
| 1302 |
+
"language": "python",
|
| 1303 |
+
"name": "python3"
|
| 1304 |
+
},
|
| 1305 |
+
"language_info": {
|
| 1306 |
+
"codemirror_mode": {
|
| 1307 |
+
"name": "ipython",
|
| 1308 |
+
"version": 3
|
| 1309 |
+
},
|
| 1310 |
+
"file_extension": ".py",
|
| 1311 |
+
"mimetype": "text/x-python",
|
| 1312 |
+
"name": "python",
|
| 1313 |
+
"nbconvert_exporter": "python",
|
| 1314 |
+
"pygments_lexer": "ipython3",
|
| 1315 |
+
"version": "3.10.12"
|
| 1316 |
+
}
|
| 1317 |
+
},
|
| 1318 |
+
"nbformat": 4,
|
| 1319 |
+
"nbformat_minor": 5
|
| 1320 |
+
}
|
SEDDcoder_tokenizer/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
best.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b7509aaa778d142e1e901b4e06b3a956dd8297a9dd895849c96ea2b74a8d1e8b
|
| 3 |
+
size 2688907928
|
config.yaml
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- _self_
|
| 3 |
+
- /callbacks: [checkpoint_every_n_steps, checkpoint_monitor, learning_rate_monitor]
|
| 4 |
+
- /data: openwebtext
|
| 5 |
+
- /model: small
|
| 6 |
+
- /strategy: ddp
|
| 7 |
+
- /noise: loglinear
|
| 8 |
+
- /lr_scheduler: constant_warmup
|
| 9 |
+
|
| 10 |
+
mode: train # train / ppl_eval / sample_eval
|
| 11 |
+
diffusion: absorbing_state
|
| 12 |
+
backbone: dit # dit / dimamba / ar : backbone for Diffusion
|
| 13 |
+
ebm_backbone: null # dit / dimamba / ar : backbone for EBM
|
| 14 |
+
parameterization: subs # subs / d3pm / sedd
|
| 15 |
+
time_conditioning: True
|
| 16 |
+
T: 0 # 0 (continuous time) / 1000
|
| 17 |
+
subs_masking: False
|
| 18 |
+
|
| 19 |
+
seed: 1
|
| 20 |
+
|
| 21 |
+
loader:
|
| 22 |
+
global_batch_size: 512
|
| 23 |
+
eval_global_batch_size: ${.global_batch_size}
|
| 24 |
+
# Note: batch_size and eval_batch_size are **per machine**
|
| 25 |
+
batch_size: ${div_up:${.global_batch_size}, ${eval:${trainer.devices} * ${trainer.num_nodes}}}
|
| 26 |
+
eval_batch_size: ${div_up:${.eval_global_batch_size}, ${eval:${trainer.devices} * ${trainer.num_nodes}}}
|
| 27 |
+
num_workers: ${eval:"len(__import__('os').sched_getaffinity(0))"}
|
| 28 |
+
pin_memory: True
|
| 29 |
+
|
| 30 |
+
sampling:
|
| 31 |
+
predictor: ddpm_cache # analytic, ddpm, ddpm_cache
|
| 32 |
+
steps: 128
|
| 33 |
+
noise_removal: True
|
| 34 |
+
# TODO(yair): @subham, why aren't these params under `eval`?
|
| 35 |
+
num_sample_batches: 2 # Total samples: `num_gpus` * `loader.eval_batch_size` * num_sample_batches
|
| 36 |
+
num_sample_log: 2
|
| 37 |
+
semi_ar: False
|
| 38 |
+
stride_length: 1
|
| 39 |
+
num_strides: 1
|
| 40 |
+
# importance sampling
|
| 41 |
+
is_size: 2
|
| 42 |
+
is_start: 0.6
|
| 43 |
+
is_end: 0.4
|
| 44 |
+
is_temp: 1
|
| 45 |
+
# ar ebm
|
| 46 |
+
ar_carry_over: True
|
| 47 |
+
|
| 48 |
+
training:
|
| 49 |
+
ema: 0.9999
|
| 50 |
+
antithetic_sampling: True
|
| 51 |
+
importance_sampling: False
|
| 52 |
+
sampling_eps: 1e-3
|
| 53 |
+
change_of_variables: False
|
| 54 |
+
|
| 55 |
+
eval:
|
| 56 |
+
checkpoint_path: '' # Used to evaluate a checkpoint after training.
|
| 57 |
+
disable_ema: False
|
| 58 |
+
compute_generative_perplexity: False
|
| 59 |
+
perplexity_batch_size: 8
|
| 60 |
+
compute_perplexity_on_sanity: False
|
| 61 |
+
gen_ppl_eval_model_name_or_path: gpt2-large # gpt2-large, meta-llama/Llama-2-7b-hf, meta-llama/Meta-Llama-3-8B
|
| 62 |
+
generate_samples: True
|
| 63 |
+
|
| 64 |
+
optim:
|
| 65 |
+
weight_decay: 0
|
| 66 |
+
lr: 3e-4
|
| 67 |
+
beta1: 0.9
|
| 68 |
+
beta2: 0.999
|
| 69 |
+
eps: 1e-8
|
| 70 |
+
|
| 71 |
+
trainer:
|
| 72 |
+
_target_: lightning.Trainer
|
| 73 |
+
accelerator: cuda
|
| 74 |
+
num_nodes: 1
|
| 75 |
+
devices: ${device_count:}
|
| 76 |
+
accumulate_grad_batches: ${div_up:${loader.global_batch_size}, ${eval:${trainer.devices} * ${loader.batch_size} * ${trainer.num_nodes}}}
|
| 77 |
+
gradient_clip_val: 1.0
|
| 78 |
+
precision: 'bf16'
|
| 79 |
+
num_sanity_val_steps: 2
|
| 80 |
+
max_steps: 1_000_000
|
| 81 |
+
log_every_n_steps: 10
|
| 82 |
+
limit_train_batches: 1.0 # train on full dataset, can be used to toggle quick run
|
| 83 |
+
limit_val_batches: 1.0 # validate on full dataset, can be used to toggle quick run
|
| 84 |
+
val_check_interval: 10000
|
| 85 |
+
|
| 86 |
+
wandb:
|
| 87 |
+
project: text-diffusion
|
| 88 |
+
notes: Mulan for text
|
| 89 |
+
group: null
|
| 90 |
+
job_type: null
|
| 91 |
+
name: null
|
| 92 |
+
id: ${.name}_${seed}
|
| 93 |
+
tags:
|
| 94 |
+
- ${noise.type}
|
| 95 |
+
- ${data.train}
|
| 96 |
+
- ${data.valid}
|
| 97 |
+
|
| 98 |
+
hydra:
|
| 99 |
+
run:
|
| 100 |
+
dir: ./outputs/${data.train}/${now:%Y.%m.%d}/${now:%H%M%S}
|
| 101 |
+
job:
|
| 102 |
+
chdir: true
|
| 103 |
+
|
| 104 |
+
checkpointing:
|
| 105 |
+
# Use custom `save_dir` if, e.g., saving to S3 bucket, otherwise leave this parameter as is
|
| 106 |
+
save_dir: ${cwd:}
|
| 107 |
+
# Note: `checkpoints` path should correspond to `checkpoint_every_n_steps.dirpath`
|
| 108 |
+
resume_from_ckpt: true
|
| 109 |
+
resume_ckpt_path: ${.save_dir}/checkpoints/last.ckpt
|