huruo commited on
Commit
5f91276
·
verified ·
1 Parent(s): 9b03ddd

Upload folder using huggingface_hub

Browse files
Files changed (36) hide show
  1. .gitattributes +1 -0
  2. s1k-1.1/full/checkpoint-225/config.json +30 -0
  3. s1k-1.1/full/checkpoint-225/generation_config.json +9 -0
  4. s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  5. s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  6. s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  7. s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  8. s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
  9. s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt +3 -0
  10. s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
  11. s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
  12. s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
  13. s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
  14. s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
  15. s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
  16. s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_4_mp_rank_00_model_states.pt +3 -0
  17. s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_5_mp_rank_00_model_states.pt +3 -0
  18. s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_6_mp_rank_00_model_states.pt +3 -0
  19. s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_7_mp_rank_00_model_states.pt +3 -0
  20. s1k-1.1/full/checkpoint-225/latest +1 -0
  21. s1k-1.1/full/checkpoint-225/model.safetensors +3 -0
  22. s1k-1.1/full/checkpoint-225/rng_state_0.pth +3 -0
  23. s1k-1.1/full/checkpoint-225/rng_state_1.pth +3 -0
  24. s1k-1.1/full/checkpoint-225/rng_state_2.pth +3 -0
  25. s1k-1.1/full/checkpoint-225/rng_state_3.pth +3 -0
  26. s1k-1.1/full/checkpoint-225/rng_state_4.pth +3 -0
  27. s1k-1.1/full/checkpoint-225/rng_state_5.pth +3 -0
  28. s1k-1.1/full/checkpoint-225/rng_state_6.pth +3 -0
  29. s1k-1.1/full/checkpoint-225/rng_state_7.pth +3 -0
  30. s1k-1.1/full/checkpoint-225/scheduler.pt +3 -0
  31. s1k-1.1/full/checkpoint-225/special_tokens_map.json +32 -0
  32. s1k-1.1/full/checkpoint-225/tokenizer.json +3 -0
  33. s1k-1.1/full/checkpoint-225/tokenizer_config.json +207 -0
  34. s1k-1.1/full/checkpoint-225/trainer_state.json +1608 -0
  35. s1k-1.1/full/checkpoint-225/training_args.bin +3 -0
  36. s1k-1.1/full/checkpoint-225/zero_to_fp32.py +674 -0
.gitattributes CHANGED
@@ -38,3 +38,4 @@ limo/full/checkpoint-1030/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
  limo/full/checkpoint-1545/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
  s1k-1.1/full/checkpoint-75/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
  s1k-1.1/full/checkpoint-150/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
38
  limo/full/checkpoint-1545/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
  s1k-1.1/full/checkpoint-75/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
  s1k-1.1/full/checkpoint-150/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
+ s1k-1.1/full/checkpoint-225/tokenizer.json filter=lfs diff=lfs merge=lfs -text
s1k-1.1/full/checkpoint-225/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/mnt/public/data/lh/models/DeepSeek-R1-Distill-Qwen-1.5B",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1536,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 8960,
13
+ "max_position_embeddings": 131072,
14
+ "max_window_layers": 21,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": null,
21
+ "rope_theta": 10000,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.46.1",
26
+ "use_cache": false,
27
+ "use_mrope": false,
28
+ "use_sliding_window": false,
29
+ "vocab_size": 151936
30
+ }
s1k-1.1/full/checkpoint-225/generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151646,
4
+ "do_sample": true,
5
+ "eos_token_id": 151643,
6
+ "temperature": 0.6,
7
+ "top_p": 0.95,
8
+ "transformers_version": "4.46.1"
9
+ }
s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:722b59518703b26027322f1ece90274a9b8ea3438595be8e28d52704dbd46c10
3
+ size 2665636656
s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a01146c9649b4ae679de89928c4eefffac5f86b8daef49266f91e7026b032aec
3
+ size 2665636656
s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abc81616062d9a803f9a2700f7a3bd44ef64bc3a5d893035f8b151d7f2255ca4
3
+ size 2665636656
s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95baef54d5b319df9beb10358037673f5f81cf15e8f6f6d5475451640fafe93a
3
+ size 2665636656
s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d99d386ae91463be483a7fb4e330f906e08b6a39274eb898495576252eec9a9d
3
+ size 2665636656
s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2f1f2b4b14240db891543cb722f6c8c7afd7e5f9b68e07e35a48a1a7b4e0b0
3
+ size 2665636656
s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22624d97d1265852e023bf09303f2f621bac7f64114ba78eb8e3d80f80009f32
3
+ size 2665636656
s1k-1.1/full/checkpoint-225/global_step225/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae4f1021be6e3e455d73fe27ec4b3f60f96888c4840c6acf802fba14c80cbe6
3
+ size 2665636656
s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb15bcd1c9b1afd2e0f745959fa8aa8e71dda0275d04d5965a6eb5cdeaf953e2
3
+ size 168405
s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:830b6b75d6461408a503fefde50d0ec9473a212daacb818c0669ec7ebcdf59cb
3
+ size 168405
s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0414b924ea4d866f19475c27a987bd3032ef66eb60b331897cc1e2af1922b4e2
3
+ size 168405
s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8656fe6b4564422b4acc3b3e19d1828a24fd2f5e8b2a00ed66f137288348fa
3
+ size 168405
s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_4_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0de8a03d2d0abc6601602492a055da3a26029bee99f40bef9af668651ee15ec
3
+ size 168405
s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_5_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa930234ec57f829c16bcc9aaf35b69687e62b39dada817b4f9c7c52e1ea23be
3
+ size 168405
s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_6_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d43c20dfe2ba7b959d176ab94fe5185add47aa507964b2a01375352527ba476
3
+ size 168405
s1k-1.1/full/checkpoint-225/global_step225/zero_pp_rank_7_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2746163875458e7ca84c20471d1853cf54af86c83caaca591368dc1b46fa2e3
3
+ size 168405
s1k-1.1/full/checkpoint-225/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step225
s1k-1.1/full/checkpoint-225/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7d77f9ab393e40b24fa4207d69449526fea35af8e9eff95aee7e7b50dd6eecf
3
+ size 3554214752
s1k-1.1/full/checkpoint-225/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07d994b317c4df888a1a1aabc0c532e81f1fa34c18c8313cb2feadca3bb37194
3
+ size 15984
s1k-1.1/full/checkpoint-225/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f5b05860618aa49c7f5d8c366d6ee73cf8b3b0d0adc17d9313b72621630d0aa
3
+ size 15984
s1k-1.1/full/checkpoint-225/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7262faf861e984775b4fd85bc76a11b0b8b04037690e8a08a58cf9ff5328a042
3
+ size 15984
s1k-1.1/full/checkpoint-225/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9479cad91150e2e266d17eb95fe678579a770f6df6b53496cf72067b186b094d
3
+ size 15984
s1k-1.1/full/checkpoint-225/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:435cb6cf559e0ce3fe0d4582cac16ea40b48b7a64589952402a4c399cafbfc00
3
+ size 15984
s1k-1.1/full/checkpoint-225/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f51001b0d8dc5792180c3a9705ccbfa66b61d46d7639afb6f7abf409629ed74f
3
+ size 15984
s1k-1.1/full/checkpoint-225/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1e87084f11088fdce293e1fbbb05e35f5c7385b00e2f9ba195bf61cb36f757d
3
+ size 15984
s1k-1.1/full/checkpoint-225/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d32e9bdd65145ae509e6c6ef4f6ea9d842f94a34c34a0d7d2ab6c248d3f2121
3
+ size 15984
s1k-1.1/full/checkpoint-225/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:736fd5de2fc57872d8f115e475efd2d8ab0cd7c18acc1cd716b760a78e0c760a
3
+ size 1064
s1k-1.1/full/checkpoint-225/special_tokens_map.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_end|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ ],
11
+ "bos_token": {
12
+ "content": "<|begin▁of▁sentence|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "eos_token": {
19
+ "content": "<|end▁of▁sentence|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<|end▁of▁sentence|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ }
32
+ }
s1k-1.1/full/checkpoint-225/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02643f00207dfc5ed248992486bde04314c21dca556bf65ce520690962b8db63
3
+ size 11422965
s1k-1.1/full/checkpoint-225/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "151643": {
7
+ "content": "<|end▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "151644": {
15
+ "content": "<|User|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": false
21
+ },
22
+ "151645": {
23
+ "content": "<|Assistant|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "151646": {
31
+ "content": "<|begin▁of▁sentence|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "151647": {
39
+ "content": "<|EOT|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ },
46
+ "151648": {
47
+ "content": "<think>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": false
53
+ },
54
+ "151649": {
55
+ "content": "</think>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "151650": {
63
+ "content": "<|quad_start|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "151651": {
71
+ "content": "<|quad_end|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "151652": {
79
+ "content": "<|vision_start|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "151653": {
87
+ "content": "<|vision_end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "151654": {
95
+ "content": "<|vision_pad|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "151655": {
103
+ "content": "<|image_pad|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "151656": {
111
+ "content": "<|video_pad|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "151657": {
119
+ "content": "<tool_call>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ },
126
+ "151658": {
127
+ "content": "</tool_call>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": false
133
+ },
134
+ "151659": {
135
+ "content": "<|fim_prefix|>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": false
141
+ },
142
+ "151660": {
143
+ "content": "<|fim_middle|>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ },
150
+ "151661": {
151
+ "content": "<|fim_suffix|>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": false
157
+ },
158
+ "151662": {
159
+ "content": "<|fim_pad|>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": false
165
+ },
166
+ "151663": {
167
+ "content": "<|repo_name|>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": false
173
+ },
174
+ "151664": {
175
+ "content": "<|file_sep|>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": false
181
+ },
182
+ "151665": {
183
+ "content": "<|im_end|>",
184
+ "lstrip": false,
185
+ "normalized": false,
186
+ "rstrip": false,
187
+ "single_word": false,
188
+ "special": true
189
+ }
190
+ },
191
+ "additional_special_tokens": [
192
+ "<|im_end|>"
193
+ ],
194
+ "bos_token": "<|begin▁of▁sentence|>",
195
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\\n'}}{% endif %}",
196
+ "clean_up_tokenization_spaces": false,
197
+ "eos_token": "<|end▁of▁sentence|>",
198
+ "legacy": true,
199
+ "model_max_length": 20000,
200
+ "pad_token": "<|end▁of▁sentence|>",
201
+ "padding_side": "right",
202
+ "sp_model_kwargs": {},
203
+ "split_special_tokens": false,
204
+ "tokenizer_class": "LlamaTokenizer",
205
+ "unk_token": null,
206
+ "use_default_system_prompt": false
207
+ }
s1k-1.1/full/checkpoint-225/trainer_state.json ADDED
@@ -0,0 +1,1608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 14.4,
5
+ "eval_steps": 500,
6
+ "global_step": 225,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.064,
13
+ "grad_norm": 1.5593382120132446,
14
+ "learning_rate": 9.999512620046523e-06,
15
+ "loss": 0.9965,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.128,
20
+ "grad_norm": 1.223217487335205,
21
+ "learning_rate": 9.998050575201772e-06,
22
+ "loss": 0.9668,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.192,
27
+ "grad_norm": 1.0882679224014282,
28
+ "learning_rate": 9.995614150494293e-06,
29
+ "loss": 0.9414,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.256,
34
+ "grad_norm": 1.0166572332382202,
35
+ "learning_rate": 9.992203820909906e-06,
36
+ "loss": 1.0033,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.32,
41
+ "grad_norm": 0.5519472360610962,
42
+ "learning_rate": 9.987820251299121e-06,
43
+ "loss": 0.9555,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.384,
48
+ "grad_norm": 0.39769354462623596,
49
+ "learning_rate": 9.982464296247523e-06,
50
+ "loss": 0.9841,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 0.448,
55
+ "grad_norm": 0.3815975487232208,
56
+ "learning_rate": 9.976136999909156e-06,
57
+ "loss": 0.9409,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.512,
62
+ "grad_norm": 0.792307436466217,
63
+ "learning_rate": 9.968839595802982e-06,
64
+ "loss": 0.9572,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 0.576,
69
+ "grad_norm": 0.8832745552062988,
70
+ "learning_rate": 9.960573506572391e-06,
71
+ "loss": 0.9821,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 0.64,
76
+ "grad_norm": 0.8655721545219421,
77
+ "learning_rate": 9.951340343707852e-06,
78
+ "loss": 0.9556,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 0.704,
83
+ "grad_norm": 0.7657322287559509,
84
+ "learning_rate": 9.941141907232766e-06,
85
+ "loss": 0.8964,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 0.768,
90
+ "grad_norm": 0.6843923330307007,
91
+ "learning_rate": 9.929980185352525e-06,
92
+ "loss": 0.9246,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 0.832,
97
+ "grad_norm": 0.5020281076431274,
98
+ "learning_rate": 9.91785735406693e-06,
99
+ "loss": 0.8841,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 0.896,
104
+ "grad_norm": 0.3772108852863312,
105
+ "learning_rate": 9.904775776745959e-06,
106
+ "loss": 0.8916,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 0.96,
111
+ "grad_norm": 0.3307054042816162,
112
+ "learning_rate": 9.890738003669029e-06,
113
+ "loss": 0.9113,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 1.024,
118
+ "grad_norm": 7.4732513427734375,
119
+ "learning_rate": 9.875746771527817e-06,
120
+ "loss": 1.1901,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 1.088,
125
+ "grad_norm": 0.5649796724319458,
126
+ "learning_rate": 9.859805002892733e-06,
127
+ "loss": 0.9004,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 1.152,
132
+ "grad_norm": 0.48279643058776855,
133
+ "learning_rate": 9.842915805643156e-06,
134
+ "loss": 0.9656,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 1.216,
139
+ "grad_norm": 0.386542409658432,
140
+ "learning_rate": 9.825082472361558e-06,
141
+ "loss": 0.8151,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 1.28,
146
+ "grad_norm": 0.45578762888908386,
147
+ "learning_rate": 9.806308479691595e-06,
148
+ "loss": 1.01,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 1.3439999999999999,
153
+ "grad_norm": 0.42114177346229553,
154
+ "learning_rate": 9.786597487660336e-06,
155
+ "loss": 0.8718,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 1.408,
160
+ "grad_norm": 0.3763532340526581,
161
+ "learning_rate": 9.765953338964736e-06,
162
+ "loss": 0.9343,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 1.472,
167
+ "grad_norm": 0.29271769523620605,
168
+ "learning_rate": 9.744380058222483e-06,
169
+ "loss": 0.8225,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 1.536,
174
+ "grad_norm": 0.2854185402393341,
175
+ "learning_rate": 9.721881851187406e-06,
176
+ "loss": 0.9583,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 1.6,
181
+ "grad_norm": 0.19865305721759796,
182
+ "learning_rate": 9.698463103929542e-06,
183
+ "loss": 0.8836,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 1.6640000000000001,
188
+ "grad_norm": 0.19694137573242188,
189
+ "learning_rate": 9.674128381980073e-06,
190
+ "loss": 0.9426,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 1.728,
195
+ "grad_norm": 0.18151699006557465,
196
+ "learning_rate": 9.648882429441258e-06,
197
+ "loss": 0.9363,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 1.792,
202
+ "grad_norm": 0.19295278191566467,
203
+ "learning_rate": 9.622730168061568e-06,
204
+ "loss": 0.8627,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 1.8559999999999999,
209
+ "grad_norm": 0.25808241963386536,
210
+ "learning_rate": 9.595676696276173e-06,
211
+ "loss": 1.0116,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 1.92,
216
+ "grad_norm": 0.22762736678123474,
217
+ "learning_rate": 9.567727288213005e-06,
218
+ "loss": 0.8429,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 1.984,
223
+ "grad_norm": 0.29724806547164917,
224
+ "learning_rate": 9.538887392664544e-06,
225
+ "loss": 1.0651,
226
+ "step": 31
227
+ },
228
+ {
229
+ "epoch": 2.048,
230
+ "grad_norm": 0.3138919472694397,
231
+ "learning_rate": 9.50916263202557e-06,
232
+ "loss": 1.1384,
233
+ "step": 32
234
+ },
235
+ {
236
+ "epoch": 2.112,
237
+ "grad_norm": 0.19795119762420654,
238
+ "learning_rate": 9.478558801197065e-06,
239
+ "loss": 0.865,
240
+ "step": 33
241
+ },
242
+ {
243
+ "epoch": 2.176,
244
+ "grad_norm": 0.21303512156009674,
245
+ "learning_rate": 9.44708186645649e-06,
246
+ "loss": 0.9073,
247
+ "step": 34
248
+ },
249
+ {
250
+ "epoch": 2.24,
251
+ "grad_norm": 0.1803194284439087,
252
+ "learning_rate": 9.414737964294636e-06,
253
+ "loss": 0.887,
254
+ "step": 35
255
+ },
256
+ {
257
+ "epoch": 2.304,
258
+ "grad_norm": 0.15727847814559937,
259
+ "learning_rate": 9.381533400219319e-06,
260
+ "loss": 0.8712,
261
+ "step": 36
262
+ },
263
+ {
264
+ "epoch": 2.368,
265
+ "grad_norm": 0.15251287817955017,
266
+ "learning_rate": 9.347474647526095e-06,
267
+ "loss": 0.8583,
268
+ "step": 37
269
+ },
270
+ {
271
+ "epoch": 2.432,
272
+ "grad_norm": 0.1633308231830597,
273
+ "learning_rate": 9.312568346036288e-06,
274
+ "loss": 0.9924,
275
+ "step": 38
276
+ },
277
+ {
278
+ "epoch": 2.496,
279
+ "grad_norm": 0.18498282134532928,
280
+ "learning_rate": 9.276821300802535e-06,
281
+ "loss": 0.9926,
282
+ "step": 39
283
+ },
284
+ {
285
+ "epoch": 2.56,
286
+ "grad_norm": 0.16592994332313538,
287
+ "learning_rate": 9.24024048078213e-06,
288
+ "loss": 0.841,
289
+ "step": 40
290
+ },
291
+ {
292
+ "epoch": 2.624,
293
+ "grad_norm": 0.18246057629585266,
294
+ "learning_rate": 9.202833017478421e-06,
295
+ "loss": 0.8179,
296
+ "step": 41
297
+ },
298
+ {
299
+ "epoch": 2.6879999999999997,
300
+ "grad_norm": 0.1779918223619461,
301
+ "learning_rate": 9.164606203550498e-06,
302
+ "loss": 0.9315,
303
+ "step": 42
304
+ },
305
+ {
306
+ "epoch": 2.752,
307
+ "grad_norm": 0.15593133866786957,
308
+ "learning_rate": 9.125567491391476e-06,
309
+ "loss": 0.804,
310
+ "step": 43
311
+ },
312
+ {
313
+ "epoch": 2.816,
314
+ "grad_norm": 0.1553468406200409,
315
+ "learning_rate": 9.085724491675642e-06,
316
+ "loss": 0.9364,
317
+ "step": 44
318
+ },
319
+ {
320
+ "epoch": 2.88,
321
+ "grad_norm": 0.17488640546798706,
322
+ "learning_rate": 9.045084971874738e-06,
323
+ "loss": 0.876,
324
+ "step": 45
325
+ },
326
+ {
327
+ "epoch": 2.944,
328
+ "grad_norm": 0.16460111737251282,
329
+ "learning_rate": 9.003656854743667e-06,
330
+ "loss": 0.9124,
331
+ "step": 46
332
+ },
333
+ {
334
+ "epoch": 3.008,
335
+ "grad_norm": 0.19362136721611023,
336
+ "learning_rate": 8.961448216775955e-06,
337
+ "loss": 1.1932,
338
+ "step": 47
339
+ },
340
+ {
341
+ "epoch": 3.072,
342
+ "grad_norm": 0.15131668746471405,
343
+ "learning_rate": 8.9184672866292e-06,
344
+ "loss": 0.8872,
345
+ "step": 48
346
+ },
347
+ {
348
+ "epoch": 3.136,
349
+ "grad_norm": 0.14356467127799988,
350
+ "learning_rate": 8.874722443520898e-06,
351
+ "loss": 0.9082,
352
+ "step": 49
353
+ },
354
+ {
355
+ "epoch": 3.2,
356
+ "grad_norm": 0.12759928405284882,
357
+ "learning_rate": 8.83022221559489e-06,
358
+ "loss": 0.8845,
359
+ "step": 50
360
+ },
361
+ {
362
+ "epoch": 3.2640000000000002,
363
+ "grad_norm": 0.15475597977638245,
364
+ "learning_rate": 8.784975278258783e-06,
365
+ "loss": 0.9539,
366
+ "step": 51
367
+ },
368
+ {
369
+ "epoch": 3.328,
370
+ "grad_norm": 0.13172124326229095,
371
+ "learning_rate": 8.73899045249266e-06,
372
+ "loss": 0.8267,
373
+ "step": 52
374
+ },
375
+ {
376
+ "epoch": 3.392,
377
+ "grad_norm": 0.1589364856481552,
378
+ "learning_rate": 8.692276703129421e-06,
379
+ "loss": 0.8551,
380
+ "step": 53
381
+ },
382
+ {
383
+ "epoch": 3.456,
384
+ "grad_norm": 0.1318974792957306,
385
+ "learning_rate": 8.644843137107058e-06,
386
+ "loss": 0.8704,
387
+ "step": 54
388
+ },
389
+ {
390
+ "epoch": 3.52,
391
+ "grad_norm": 0.1344061940908432,
392
+ "learning_rate": 8.596699001693257e-06,
393
+ "loss": 0.8843,
394
+ "step": 55
395
+ },
396
+ {
397
+ "epoch": 3.584,
398
+ "grad_norm": 0.14365153014659882,
399
+ "learning_rate": 8.547853682682605e-06,
400
+ "loss": 0.8233,
401
+ "step": 56
402
+ },
403
+ {
404
+ "epoch": 3.648,
405
+ "grad_norm": 0.12842649221420288,
406
+ "learning_rate": 8.498316702566828e-06,
407
+ "loss": 0.8805,
408
+ "step": 57
409
+ },
410
+ {
411
+ "epoch": 3.7119999999999997,
412
+ "grad_norm": 0.12912048399448395,
413
+ "learning_rate": 8.44809771867835e-06,
414
+ "loss": 0.8576,
415
+ "step": 58
416
+ },
417
+ {
418
+ "epoch": 3.776,
419
+ "grad_norm": 0.12904053926467896,
420
+ "learning_rate": 8.397206521307584e-06,
421
+ "loss": 0.8536,
422
+ "step": 59
423
+ },
424
+ {
425
+ "epoch": 3.84,
426
+ "grad_norm": 0.14415085315704346,
427
+ "learning_rate": 8.345653031794292e-06,
428
+ "loss": 0.9647,
429
+ "step": 60
430
+ },
431
+ {
432
+ "epoch": 3.904,
433
+ "grad_norm": 0.12525947391986847,
434
+ "learning_rate": 8.293447300593402e-06,
435
+ "loss": 0.8804,
436
+ "step": 61
437
+ },
438
+ {
439
+ "epoch": 3.968,
440
+ "grad_norm": 0.13248352706432343,
441
+ "learning_rate": 8.240599505315656e-06,
442
+ "loss": 0.9382,
443
+ "step": 62
444
+ },
445
+ {
446
+ "epoch": 4.032,
447
+ "grad_norm": 0.17219072580337524,
448
+ "learning_rate": 8.18711994874345e-06,
449
+ "loss": 1.1208,
450
+ "step": 63
451
+ },
452
+ {
453
+ "epoch": 4.096,
454
+ "grad_norm": 0.12371776252985,
455
+ "learning_rate": 8.133019056822303e-06,
456
+ "loss": 0.8632,
457
+ "step": 64
458
+ },
459
+ {
460
+ "epoch": 4.16,
461
+ "grad_norm": 0.12638618052005768,
462
+ "learning_rate": 8.078307376628292e-06,
463
+ "loss": 0.9224,
464
+ "step": 65
465
+ },
466
+ {
467
+ "epoch": 4.224,
468
+ "grad_norm": 0.12182217836380005,
469
+ "learning_rate": 8.022995574311876e-06,
470
+ "loss": 0.8605,
471
+ "step": 66
472
+ },
473
+ {
474
+ "epoch": 4.288,
475
+ "grad_norm": 0.13366594910621643,
476
+ "learning_rate": 7.967094433018508e-06,
477
+ "loss": 0.8479,
478
+ "step": 67
479
+ },
480
+ {
481
+ "epoch": 4.352,
482
+ "grad_norm": 0.13507261872291565,
483
+ "learning_rate": 7.910614850786448e-06,
484
+ "loss": 0.8565,
485
+ "step": 68
486
+ },
487
+ {
488
+ "epoch": 4.416,
489
+ "grad_norm": 0.1269528865814209,
490
+ "learning_rate": 7.85356783842216e-06,
491
+ "loss": 0.8956,
492
+ "step": 69
493
+ },
494
+ {
495
+ "epoch": 4.48,
496
+ "grad_norm": 0.11027321964502335,
497
+ "learning_rate": 7.795964517353734e-06,
498
+ "loss": 0.7397,
499
+ "step": 70
500
+ },
501
+ {
502
+ "epoch": 4.5440000000000005,
503
+ "grad_norm": 0.1362013965845108,
504
+ "learning_rate": 7.737816117462752e-06,
505
+ "loss": 0.922,
506
+ "step": 71
507
+ },
508
+ {
509
+ "epoch": 4.608,
510
+ "grad_norm": 0.12494367361068726,
511
+ "learning_rate": 7.679133974894984e-06,
512
+ "loss": 0.7995,
513
+ "step": 72
514
+ },
515
+ {
516
+ "epoch": 4.672,
517
+ "grad_norm": 0.12939389050006866,
518
+ "learning_rate": 7.619929529850397e-06,
519
+ "loss": 0.8423,
520
+ "step": 73
521
+ },
522
+ {
523
+ "epoch": 4.736,
524
+ "grad_norm": 0.1359853446483612,
525
+ "learning_rate": 7.560214324352858e-06,
526
+ "loss": 0.9657,
527
+ "step": 74
528
+ },
529
+ {
530
+ "epoch": 4.8,
531
+ "grad_norm": 0.12302935123443604,
532
+ "learning_rate": 7.500000000000001e-06,
533
+ "loss": 0.8209,
534
+ "step": 75
535
+ },
536
+ {
537
+ "epoch": 4.864,
538
+ "grad_norm": 0.11996591091156006,
539
+ "learning_rate": 7.4392982956936644e-06,
540
+ "loss": 0.7744,
541
+ "step": 76
542
+ },
543
+ {
544
+ "epoch": 4.928,
545
+ "grad_norm": 0.13304197788238525,
546
+ "learning_rate": 7.378121045351378e-06,
547
+ "loss": 0.9597,
548
+ "step": 77
549
+ },
550
+ {
551
+ "epoch": 4.992,
552
+ "grad_norm": 0.1741103231906891,
553
+ "learning_rate": 7.31648017559931e-06,
554
+ "loss": 1.1339,
555
+ "step": 78
556
+ },
557
+ {
558
+ "epoch": 5.056,
559
+ "grad_norm": 0.1462891697883606,
560
+ "learning_rate": 7.254387703447154e-06,
561
+ "loss": 0.9125,
562
+ "step": 79
563
+ },
564
+ {
565
+ "epoch": 5.12,
566
+ "grad_norm": 0.11839975416660309,
567
+ "learning_rate": 7.191855733945388e-06,
568
+ "loss": 0.9398,
569
+ "step": 80
570
+ },
571
+ {
572
+ "epoch": 5.184,
573
+ "grad_norm": 0.1265653818845749,
574
+ "learning_rate": 7.128896457825364e-06,
575
+ "loss": 0.8048,
576
+ "step": 81
577
+ },
578
+ {
579
+ "epoch": 5.248,
580
+ "grad_norm": 0.12180934846401215,
581
+ "learning_rate": 7.06552214912271e-06,
582
+ "loss": 0.7876,
583
+ "step": 82
584
+ },
585
+ {
586
+ "epoch": 5.312,
587
+ "grad_norm": 0.13135825097560883,
588
+ "learning_rate": 7.0017451627844765e-06,
589
+ "loss": 0.8986,
590
+ "step": 83
591
+ },
592
+ {
593
+ "epoch": 5.376,
594
+ "grad_norm": 0.12408099323511124,
595
+ "learning_rate": 6.9375779322605154e-06,
596
+ "loss": 0.8158,
597
+ "step": 84
598
+ },
599
+ {
600
+ "epoch": 5.44,
601
+ "grad_norm": 0.12082885205745697,
602
+ "learning_rate": 6.873032967079562e-06,
603
+ "loss": 0.9035,
604
+ "step": 85
605
+ },
606
+ {
607
+ "epoch": 5.504,
608
+ "grad_norm": 0.12515701353549957,
609
+ "learning_rate": 6.808122850410461e-06,
610
+ "loss": 0.8674,
611
+ "step": 86
612
+ },
613
+ {
614
+ "epoch": 5.568,
615
+ "grad_norm": 0.12426020950078964,
616
+ "learning_rate": 6.7428602366090764e-06,
617
+ "loss": 0.7893,
618
+ "step": 87
619
+ },
620
+ {
621
+ "epoch": 5.632,
622
+ "grad_norm": 0.12863430380821228,
623
+ "learning_rate": 6.677257848751276e-06,
624
+ "loss": 0.8792,
625
+ "step": 88
626
+ },
627
+ {
628
+ "epoch": 5.696,
629
+ "grad_norm": 0.1337299644947052,
630
+ "learning_rate": 6.611328476152557e-06,
631
+ "loss": 0.8779,
632
+ "step": 89
633
+ },
634
+ {
635
+ "epoch": 5.76,
636
+ "grad_norm": 0.13116392493247986,
637
+ "learning_rate": 6.545084971874738e-06,
638
+ "loss": 0.8662,
639
+ "step": 90
640
+ },
641
+ {
642
+ "epoch": 5.824,
643
+ "grad_norm": 0.12727566063404083,
644
+ "learning_rate": 6.4785402502202345e-06,
645
+ "loss": 0.8901,
646
+ "step": 91
647
+ },
648
+ {
649
+ "epoch": 5.888,
650
+ "grad_norm": 0.12160185724496841,
651
+ "learning_rate": 6.411707284214384e-06,
652
+ "loss": 0.8608,
653
+ "step": 92
654
+ },
655
+ {
656
+ "epoch": 5.952,
657
+ "grad_norm": 0.1284012645483017,
658
+ "learning_rate": 6.344599103076329e-06,
659
+ "loss": 0.8304,
660
+ "step": 93
661
+ },
662
+ {
663
+ "epoch": 6.016,
664
+ "grad_norm": 0.19524367153644562,
665
+ "learning_rate": 6.277228789678953e-06,
666
+ "loss": 1.2065,
667
+ "step": 94
668
+ },
669
+ {
670
+ "epoch": 6.08,
671
+ "grad_norm": 0.11789774894714355,
672
+ "learning_rate": 6.209609477998339e-06,
673
+ "loss": 0.77,
674
+ "step": 95
675
+ },
676
+ {
677
+ "epoch": 6.144,
678
+ "grad_norm": 0.12179970741271973,
679
+ "learning_rate": 6.141754350553279e-06,
680
+ "loss": 0.9022,
681
+ "step": 96
682
+ },
683
+ {
684
+ "epoch": 6.208,
685
+ "grad_norm": 0.12981604039669037,
686
+ "learning_rate": 6.073676635835317e-06,
687
+ "loss": 0.8849,
688
+ "step": 97
689
+ },
690
+ {
691
+ "epoch": 6.272,
692
+ "grad_norm": 0.13244786858558655,
693
+ "learning_rate": 6.005389605729824e-06,
694
+ "loss": 0.789,
695
+ "step": 98
696
+ },
697
+ {
698
+ "epoch": 6.336,
699
+ "grad_norm": 0.12296921759843826,
700
+ "learning_rate": 5.936906572928625e-06,
701
+ "loss": 0.8968,
702
+ "step": 99
703
+ },
704
+ {
705
+ "epoch": 6.4,
706
+ "grad_norm": 0.1255040466785431,
707
+ "learning_rate": 5.8682408883346535e-06,
708
+ "loss": 0.8117,
709
+ "step": 100
710
+ },
711
+ {
712
+ "epoch": 6.464,
713
+ "grad_norm": 0.13155046105384827,
714
+ "learning_rate": 5.799405938459175e-06,
715
+ "loss": 0.92,
716
+ "step": 101
717
+ },
718
+ {
719
+ "epoch": 6.5280000000000005,
720
+ "grad_norm": 0.12069229036569595,
721
+ "learning_rate": 5.730415142812059e-06,
722
+ "loss": 0.8371,
723
+ "step": 102
724
+ },
725
+ {
726
+ "epoch": 6.592,
727
+ "grad_norm": 0.12130100280046463,
728
+ "learning_rate": 5.661281951285613e-06,
729
+ "loss": 0.8401,
730
+ "step": 103
731
+ },
732
+ {
733
+ "epoch": 6.656,
734
+ "grad_norm": 0.12238356471061707,
735
+ "learning_rate": 5.592019841532507e-06,
736
+ "loss": 0.838,
737
+ "step": 104
738
+ },
739
+ {
740
+ "epoch": 6.72,
741
+ "grad_norm": 0.13303394615650177,
742
+ "learning_rate": 5.522642316338268e-06,
743
+ "loss": 0.9041,
744
+ "step": 105
745
+ },
746
+ {
747
+ "epoch": 6.784,
748
+ "grad_norm": 0.1444772183895111,
749
+ "learning_rate": 5.453162900988902e-06,
750
+ "loss": 0.9033,
751
+ "step": 106
752
+ },
753
+ {
754
+ "epoch": 6.848,
755
+ "grad_norm": 0.12250606715679169,
756
+ "learning_rate": 5.383595140634093e-06,
757
+ "loss": 0.7294,
758
+ "step": 107
759
+ },
760
+ {
761
+ "epoch": 6.912,
762
+ "grad_norm": 0.12612612545490265,
763
+ "learning_rate": 5.3139525976465675e-06,
764
+ "loss": 0.8332,
765
+ "step": 108
766
+ },
767
+ {
768
+ "epoch": 6.976,
769
+ "grad_norm": 0.14953787624835968,
770
+ "learning_rate": 5.244248848978067e-06,
771
+ "loss": 0.9709,
772
+ "step": 109
773
+ },
774
+ {
775
+ "epoch": 7.04,
776
+ "grad_norm": 0.14709977805614471,
777
+ "learning_rate": 5.174497483512506e-06,
778
+ "loss": 0.9743,
779
+ "step": 110
780
+ },
781
+ {
782
+ "epoch": 7.104,
783
+ "grad_norm": 0.12281537801027298,
784
+ "learning_rate": 5.1047120994167855e-06,
785
+ "loss": 0.8208,
786
+ "step": 111
787
+ },
788
+ {
789
+ "epoch": 7.168,
790
+ "grad_norm": 0.13773250579833984,
791
+ "learning_rate": 5.034906301489808e-06,
792
+ "loss": 0.8659,
793
+ "step": 112
794
+ },
795
+ {
796
+ "epoch": 7.232,
797
+ "grad_norm": 0.12388751655817032,
798
+ "learning_rate": 4.965093698510192e-06,
799
+ "loss": 0.8874,
800
+ "step": 113
801
+ },
802
+ {
803
+ "epoch": 7.296,
804
+ "grad_norm": 0.1328098624944687,
805
+ "learning_rate": 4.895287900583216e-06,
806
+ "loss": 0.8538,
807
+ "step": 114
808
+ },
809
+ {
810
+ "epoch": 7.36,
811
+ "grad_norm": 0.1243242472410202,
812
+ "learning_rate": 4.825502516487497e-06,
813
+ "loss": 0.7639,
814
+ "step": 115
815
+ },
816
+ {
817
+ "epoch": 7.424,
818
+ "grad_norm": 0.12820030748844147,
819
+ "learning_rate": 4.755751151021934e-06,
820
+ "loss": 0.891,
821
+ "step": 116
822
+ },
823
+ {
824
+ "epoch": 7.4879999999999995,
825
+ "grad_norm": 0.12143447995185852,
826
+ "learning_rate": 4.686047402353433e-06,
827
+ "loss": 0.8208,
828
+ "step": 117
829
+ },
830
+ {
831
+ "epoch": 7.552,
832
+ "grad_norm": 0.12033109366893768,
833
+ "learning_rate": 4.6164048593659076e-06,
834
+ "loss": 0.7866,
835
+ "step": 118
836
+ },
837
+ {
838
+ "epoch": 7.616,
839
+ "grad_norm": 0.11808149516582489,
840
+ "learning_rate": 4.546837099011101e-06,
841
+ "loss": 0.8194,
842
+ "step": 119
843
+ },
844
+ {
845
+ "epoch": 7.68,
846
+ "grad_norm": 0.13013483583927155,
847
+ "learning_rate": 4.477357683661734e-06,
848
+ "loss": 0.9189,
849
+ "step": 120
850
+ },
851
+ {
852
+ "epoch": 7.744,
853
+ "grad_norm": 0.12187657505273819,
854
+ "learning_rate": 4.4079801584674955e-06,
855
+ "loss": 0.8126,
856
+ "step": 121
857
+ },
858
+ {
859
+ "epoch": 7.808,
860
+ "grad_norm": 0.12875136733055115,
861
+ "learning_rate": 4.3387180487143875e-06,
862
+ "loss": 0.92,
863
+ "step": 122
864
+ },
865
+ {
866
+ "epoch": 7.872,
867
+ "grad_norm": 0.12279701232910156,
868
+ "learning_rate": 4.269584857187942e-06,
869
+ "loss": 0.7819,
870
+ "step": 123
871
+ },
872
+ {
873
+ "epoch": 7.936,
874
+ "grad_norm": 0.12862621247768402,
875
+ "learning_rate": 4.200594061540827e-06,
876
+ "loss": 0.8319,
877
+ "step": 124
878
+ },
879
+ {
880
+ "epoch": 8.0,
881
+ "grad_norm": 0.19300128519535065,
882
+ "learning_rate": 4.131759111665349e-06,
883
+ "loss": 1.1825,
884
+ "step": 125
885
+ },
886
+ {
887
+ "epoch": 8.064,
888
+ "grad_norm": 0.11853662878274918,
889
+ "learning_rate": 4.063093427071376e-06,
890
+ "loss": 0.8276,
891
+ "step": 126
892
+ },
893
+ {
894
+ "epoch": 8.128,
895
+ "grad_norm": 0.12296923995018005,
896
+ "learning_rate": 3.994610394270178e-06,
897
+ "loss": 0.8654,
898
+ "step": 127
899
+ },
900
+ {
901
+ "epoch": 8.192,
902
+ "grad_norm": 0.12605859339237213,
903
+ "learning_rate": 3.926323364164684e-06,
904
+ "loss": 0.8355,
905
+ "step": 128
906
+ },
907
+ {
908
+ "epoch": 8.256,
909
+ "grad_norm": 0.11515746265649796,
910
+ "learning_rate": 3.8582456494467214e-06,
911
+ "loss": 0.8135,
912
+ "step": 129
913
+ },
914
+ {
915
+ "epoch": 8.32,
916
+ "grad_norm": 0.12183663994073868,
917
+ "learning_rate": 3.790390522001662e-06,
918
+ "loss": 0.8603,
919
+ "step": 130
920
+ },
921
+ {
922
+ "epoch": 8.384,
923
+ "grad_norm": 0.12571197748184204,
924
+ "learning_rate": 3.7227712103210485e-06,
925
+ "loss": 0.8231,
926
+ "step": 131
927
+ },
928
+ {
929
+ "epoch": 8.448,
930
+ "grad_norm": 0.12782686948776245,
931
+ "learning_rate": 3.655400896923672e-06,
932
+ "loss": 0.8856,
933
+ "step": 132
934
+ },
935
+ {
936
+ "epoch": 8.512,
937
+ "grad_norm": 0.12933164834976196,
938
+ "learning_rate": 3.5882927157856175e-06,
939
+ "loss": 0.8418,
940
+ "step": 133
941
+ },
942
+ {
943
+ "epoch": 8.576,
944
+ "grad_norm": 0.1267186850309372,
945
+ "learning_rate": 3.521459749779769e-06,
946
+ "loss": 0.7858,
947
+ "step": 134
948
+ },
949
+ {
950
+ "epoch": 8.64,
951
+ "grad_norm": 0.13652652502059937,
952
+ "learning_rate": 3.4549150281252635e-06,
953
+ "loss": 0.7915,
954
+ "step": 135
955
+ },
956
+ {
957
+ "epoch": 8.704,
958
+ "grad_norm": 0.13402396440505981,
959
+ "learning_rate": 3.3886715238474454e-06,
960
+ "loss": 0.8035,
961
+ "step": 136
962
+ },
963
+ {
964
+ "epoch": 8.768,
965
+ "grad_norm": 0.12407074123620987,
966
+ "learning_rate": 3.322742151248726e-06,
967
+ "loss": 0.8221,
968
+ "step": 137
969
+ },
970
+ {
971
+ "epoch": 8.832,
972
+ "grad_norm": 0.12156077474355698,
973
+ "learning_rate": 3.2571397633909252e-06,
974
+ "loss": 0.8522,
975
+ "step": 138
976
+ },
977
+ {
978
+ "epoch": 8.896,
979
+ "grad_norm": 0.12711839377880096,
980
+ "learning_rate": 3.1918771495895395e-06,
981
+ "loss": 0.8802,
982
+ "step": 139
983
+ },
984
+ {
985
+ "epoch": 8.96,
986
+ "grad_norm": 0.1299351155757904,
987
+ "learning_rate": 3.12696703292044e-06,
988
+ "loss": 0.8643,
989
+ "step": 140
990
+ },
991
+ {
992
+ "epoch": 9.024,
993
+ "grad_norm": 0.17408832907676697,
994
+ "learning_rate": 3.0624220677394854e-06,
995
+ "loss": 1.0836,
996
+ "step": 141
997
+ },
998
+ {
999
+ "epoch": 9.088,
1000
+ "grad_norm": 0.1219247579574585,
1001
+ "learning_rate": 2.9982548372155264e-06,
1002
+ "loss": 0.9013,
1003
+ "step": 142
1004
+ },
1005
+ {
1006
+ "epoch": 9.152,
1007
+ "grad_norm": 0.12802451848983765,
1008
+ "learning_rate": 2.934477850877292e-06,
1009
+ "loss": 0.8827,
1010
+ "step": 143
1011
+ },
1012
+ {
1013
+ "epoch": 9.216,
1014
+ "grad_norm": 0.11756030470132828,
1015
+ "learning_rate": 2.871103542174637e-06,
1016
+ "loss": 0.7977,
1017
+ "step": 144
1018
+ },
1019
+ {
1020
+ "epoch": 9.28,
1021
+ "grad_norm": 0.1230553537607193,
1022
+ "learning_rate": 2.8081442660546126e-06,
1023
+ "loss": 0.8603,
1024
+ "step": 145
1025
+ },
1026
+ {
1027
+ "epoch": 9.344,
1028
+ "grad_norm": 0.12843072414398193,
1029
+ "learning_rate": 2.7456122965528475e-06,
1030
+ "loss": 0.7889,
1031
+ "step": 146
1032
+ },
1033
+ {
1034
+ "epoch": 9.408,
1035
+ "grad_norm": 0.13244116306304932,
1036
+ "learning_rate": 2.683519824400693e-06,
1037
+ "loss": 0.8419,
1038
+ "step": 147
1039
+ },
1040
+ {
1041
+ "epoch": 9.472,
1042
+ "grad_norm": 0.1266927719116211,
1043
+ "learning_rate": 2.6218789546486235e-06,
1044
+ "loss": 0.8097,
1045
+ "step": 148
1046
+ },
1047
+ {
1048
+ "epoch": 9.536,
1049
+ "grad_norm": 0.11919289082288742,
1050
+ "learning_rate": 2.560701704306336e-06,
1051
+ "loss": 0.7759,
1052
+ "step": 149
1053
+ },
1054
+ {
1055
+ "epoch": 9.6,
1056
+ "grad_norm": 0.12574751675128937,
1057
+ "learning_rate": 2.5000000000000015e-06,
1058
+ "loss": 0.8389,
1059
+ "step": 150
1060
+ },
1061
+ {
1062
+ "epoch": 9.664,
1063
+ "grad_norm": 0.14269892871379852,
1064
+ "learning_rate": 2.4397856756471435e-06,
1065
+ "loss": 0.8767,
1066
+ "step": 151
1067
+ },
1068
+ {
1069
+ "epoch": 9.728,
1070
+ "grad_norm": 0.12222199141979218,
1071
+ "learning_rate": 2.380070470149605e-06,
1072
+ "loss": 0.7306,
1073
+ "step": 152
1074
+ },
1075
+ {
1076
+ "epoch": 9.792,
1077
+ "grad_norm": 0.11675389856100082,
1078
+ "learning_rate": 2.320866025105016e-06,
1079
+ "loss": 0.739,
1080
+ "step": 153
1081
+ },
1082
+ {
1083
+ "epoch": 9.856,
1084
+ "grad_norm": 0.13254772126674652,
1085
+ "learning_rate": 2.2621838825372496e-06,
1086
+ "loss": 0.9458,
1087
+ "step": 154
1088
+ },
1089
+ {
1090
+ "epoch": 9.92,
1091
+ "grad_norm": 0.11959590762853622,
1092
+ "learning_rate": 2.204035482646267e-06,
1093
+ "loss": 0.8222,
1094
+ "step": 155
1095
+ },
1096
+ {
1097
+ "epoch": 9.984,
1098
+ "grad_norm": 0.15119311213493347,
1099
+ "learning_rate": 2.146432161577842e-06,
1100
+ "loss": 1.0054,
1101
+ "step": 156
1102
+ },
1103
+ {
1104
+ "epoch": 10.048,
1105
+ "grad_norm": 0.14419986307621002,
1106
+ "learning_rate": 2.0893851492135536e-06,
1107
+ "loss": 0.9804,
1108
+ "step": 157
1109
+ },
1110
+ {
1111
+ "epoch": 10.112,
1112
+ "grad_norm": 0.11810199171304703,
1113
+ "learning_rate": 2.0329055669814936e-06,
1114
+ "loss": 0.7982,
1115
+ "step": 158
1116
+ },
1117
+ {
1118
+ "epoch": 10.176,
1119
+ "grad_norm": 0.1280771642923355,
1120
+ "learning_rate": 1.977004425688126e-06,
1121
+ "loss": 0.808,
1122
+ "step": 159
1123
+ },
1124
+ {
1125
+ "epoch": 10.24,
1126
+ "grad_norm": 0.12855829298496246,
1127
+ "learning_rate": 1.9216926233717087e-06,
1128
+ "loss": 0.8768,
1129
+ "step": 160
1130
+ },
1131
+ {
1132
+ "epoch": 10.304,
1133
+ "grad_norm": 0.12173178791999817,
1134
+ "learning_rate": 1.8669809431776991e-06,
1135
+ "loss": 0.8733,
1136
+ "step": 161
1137
+ },
1138
+ {
1139
+ "epoch": 10.368,
1140
+ "grad_norm": 0.11684292554855347,
1141
+ "learning_rate": 1.8128800512565514e-06,
1142
+ "loss": 0.7227,
1143
+ "step": 162
1144
+ },
1145
+ {
1146
+ "epoch": 10.432,
1147
+ "grad_norm": 0.1230064183473587,
1148
+ "learning_rate": 1.7594004946843458e-06,
1149
+ "loss": 0.8293,
1150
+ "step": 163
1151
+ },
1152
+ {
1153
+ "epoch": 10.496,
1154
+ "grad_norm": 0.11779855191707611,
1155
+ "learning_rate": 1.7065526994065973e-06,
1156
+ "loss": 0.7961,
1157
+ "step": 164
1158
+ },
1159
+ {
1160
+ "epoch": 10.56,
1161
+ "grad_norm": 0.12627413868904114,
1162
+ "learning_rate": 1.6543469682057105e-06,
1163
+ "loss": 0.8582,
1164
+ "step": 165
1165
+ },
1166
+ {
1167
+ "epoch": 10.624,
1168
+ "grad_norm": 0.12739939987659454,
1169
+ "learning_rate": 1.6027934786924187e-06,
1170
+ "loss": 0.8743,
1171
+ "step": 166
1172
+ },
1173
+ {
1174
+ "epoch": 10.688,
1175
+ "grad_norm": 0.12277089059352875,
1176
+ "learning_rate": 1.551902281321651e-06,
1177
+ "loss": 0.8122,
1178
+ "step": 167
1179
+ },
1180
+ {
1181
+ "epoch": 10.752,
1182
+ "grad_norm": 0.12769779562950134,
1183
+ "learning_rate": 1.5016832974331725e-06,
1184
+ "loss": 0.8025,
1185
+ "step": 168
1186
+ },
1187
+ {
1188
+ "epoch": 10.816,
1189
+ "grad_norm": 0.12428705394268036,
1190
+ "learning_rate": 1.4521463173173966e-06,
1191
+ "loss": 0.8071,
1192
+ "step": 169
1193
+ },
1194
+ {
1195
+ "epoch": 10.88,
1196
+ "grad_norm": 0.12309806048870087,
1197
+ "learning_rate": 1.4033009983067454e-06,
1198
+ "loss": 0.8643,
1199
+ "step": 170
1200
+ },
1201
+ {
1202
+ "epoch": 10.943999999999999,
1203
+ "grad_norm": 0.1236419677734375,
1204
+ "learning_rate": 1.3551568628929434e-06,
1205
+ "loss": 0.8408,
1206
+ "step": 171
1207
+ },
1208
+ {
1209
+ "epoch": 11.008,
1210
+ "grad_norm": 0.17760780453681946,
1211
+ "learning_rate": 1.3077232968705805e-06,
1212
+ "loss": 1.157,
1213
+ "step": 172
1214
+ },
1215
+ {
1216
+ "epoch": 11.072,
1217
+ "grad_norm": 0.11716406047344208,
1218
+ "learning_rate": 1.2610095475073415e-06,
1219
+ "loss": 0.7641,
1220
+ "step": 173
1221
+ },
1222
+ {
1223
+ "epoch": 11.136,
1224
+ "grad_norm": 0.12319278717041016,
1225
+ "learning_rate": 1.2150247217412186e-06,
1226
+ "loss": 0.8901,
1227
+ "step": 174
1228
+ },
1229
+ {
1230
+ "epoch": 11.2,
1231
+ "grad_norm": 0.1214849203824997,
1232
+ "learning_rate": 1.1697777844051105e-06,
1233
+ "loss": 0.8119,
1234
+ "step": 175
1235
+ },
1236
+ {
1237
+ "epoch": 11.264,
1238
+ "grad_norm": 0.12246395647525787,
1239
+ "learning_rate": 1.1252775564791023e-06,
1240
+ "loss": 0.8347,
1241
+ "step": 176
1242
+ },
1243
+ {
1244
+ "epoch": 11.328,
1245
+ "grad_norm": 0.1226000189781189,
1246
+ "learning_rate": 1.0815327133708015e-06,
1247
+ "loss": 0.8874,
1248
+ "step": 177
1249
+ },
1250
+ {
1251
+ "epoch": 11.392,
1252
+ "grad_norm": 0.12461017072200775,
1253
+ "learning_rate": 1.0385517832240472e-06,
1254
+ "loss": 0.7858,
1255
+ "step": 178
1256
+ },
1257
+ {
1258
+ "epoch": 11.456,
1259
+ "grad_norm": 0.1249910444021225,
1260
+ "learning_rate": 9.963431452563331e-07,
1261
+ "loss": 0.8025,
1262
+ "step": 179
1263
+ },
1264
+ {
1265
+ "epoch": 11.52,
1266
+ "grad_norm": 0.11750364303588867,
1267
+ "learning_rate": 9.549150281252633e-07,
1268
+ "loss": 0.8357,
1269
+ "step": 180
1270
+ },
1271
+ {
1272
+ "epoch": 11.584,
1273
+ "grad_norm": 0.12066759914159775,
1274
+ "learning_rate": 9.142755083243577e-07,
1275
+ "loss": 0.8489,
1276
+ "step": 181
1277
+ },
1278
+ {
1279
+ "epoch": 11.648,
1280
+ "grad_norm": 0.11544518917798996,
1281
+ "learning_rate": 8.744325086085248e-07,
1282
+ "loss": 0.788,
1283
+ "step": 182
1284
+ },
1285
+ {
1286
+ "epoch": 11.712,
1287
+ "grad_norm": 0.12407847493886948,
1288
+ "learning_rate": 8.353937964495029e-07,
1289
+ "loss": 0.8044,
1290
+ "step": 183
1291
+ },
1292
+ {
1293
+ "epoch": 11.776,
1294
+ "grad_norm": 0.1282244175672531,
1295
+ "learning_rate": 7.971669825215789e-07,
1296
+ "loss": 0.7997,
1297
+ "step": 184
1298
+ },
1299
+ {
1300
+ "epoch": 11.84,
1301
+ "grad_norm": 0.1248067319393158,
1302
+ "learning_rate": 7.597595192178702e-07,
1303
+ "loss": 0.835,
1304
+ "step": 185
1305
+ },
1306
+ {
1307
+ "epoch": 11.904,
1308
+ "grad_norm": 0.120790995657444,
1309
+ "learning_rate": 7.23178699197467e-07,
1310
+ "loss": 0.8166,
1311
+ "step": 186
1312
+ },
1313
+ {
1314
+ "epoch": 11.968,
1315
+ "grad_norm": 0.13572026789188385,
1316
+ "learning_rate": 6.874316539637127e-07,
1317
+ "loss": 0.8752,
1318
+ "step": 187
1319
+ },
1320
+ {
1321
+ "epoch": 12.032,
1322
+ "grad_norm": 0.1664300560951233,
1323
+ "learning_rate": 6.52525352473905e-07,
1324
+ "loss": 1.0303,
1325
+ "step": 188
1326
+ },
1327
+ {
1328
+ "epoch": 12.096,
1329
+ "grad_norm": 0.1228347048163414,
1330
+ "learning_rate": 6.184665997806832e-07,
1331
+ "loss": 0.8162,
1332
+ "step": 189
1333
+ },
1334
+ {
1335
+ "epoch": 12.16,
1336
+ "grad_norm": 0.11765308678150177,
1337
+ "learning_rate": 5.852620357053651e-07,
1338
+ "loss": 0.7837,
1339
+ "step": 190
1340
+ },
1341
+ {
1342
+ "epoch": 12.224,
1343
+ "grad_norm": 0.12184052914381027,
1344
+ "learning_rate": 5.529181335435124e-07,
1345
+ "loss": 0.7915,
1346
+ "step": 191
1347
+ },
1348
+ {
1349
+ "epoch": 12.288,
1350
+ "grad_norm": 0.12436401098966599,
1351
+ "learning_rate": 5.214411988029355e-07,
1352
+ "loss": 0.8588,
1353
+ "step": 192
1354
+ },
1355
+ {
1356
+ "epoch": 12.352,
1357
+ "grad_norm": 0.12295132875442505,
1358
+ "learning_rate": 4.908373679744316e-07,
1359
+ "loss": 0.8373,
1360
+ "step": 193
1361
+ },
1362
+ {
1363
+ "epoch": 12.416,
1364
+ "grad_norm": 0.12551254034042358,
1365
+ "learning_rate": 4.6111260733545714e-07,
1366
+ "loss": 0.8477,
1367
+ "step": 194
1368
+ },
1369
+ {
1370
+ "epoch": 12.48,
1371
+ "grad_norm": 0.12384471297264099,
1372
+ "learning_rate": 4.322727117869951e-07,
1373
+ "loss": 0.819,
1374
+ "step": 195
1375
+ },
1376
+ {
1377
+ "epoch": 12.544,
1378
+ "grad_norm": 0.12367549538612366,
1379
+ "learning_rate": 4.043233037238281e-07,
1380
+ "loss": 0.7978,
1381
+ "step": 196
1382
+ },
1383
+ {
1384
+ "epoch": 12.608,
1385
+ "grad_norm": 0.12734423577785492,
1386
+ "learning_rate": 3.772698319384349e-07,
1387
+ "loss": 0.9013,
1388
+ "step": 197
1389
+ },
1390
+ {
1391
+ "epoch": 12.672,
1392
+ "grad_norm": 0.11762301623821259,
1393
+ "learning_rate": 3.511175705587433e-07,
1394
+ "loss": 0.7289,
1395
+ "step": 198
1396
+ },
1397
+ {
1398
+ "epoch": 12.736,
1399
+ "grad_norm": 0.12712667882442474,
1400
+ "learning_rate": 3.258716180199278e-07,
1401
+ "loss": 0.9106,
1402
+ "step": 199
1403
+ },
1404
+ {
1405
+ "epoch": 12.8,
1406
+ "grad_norm": 0.11422532051801682,
1407
+ "learning_rate": 3.015368960704584e-07,
1408
+ "loss": 0.7579,
1409
+ "step": 200
1410
+ },
1411
+ {
1412
+ "epoch": 12.864,
1413
+ "grad_norm": 0.1257271021604538,
1414
+ "learning_rate": 2.7811814881259503e-07,
1415
+ "loss": 0.8527,
1416
+ "step": 201
1417
+ },
1418
+ {
1419
+ "epoch": 12.928,
1420
+ "grad_norm": 0.1224028617143631,
1421
+ "learning_rate": 2.556199417775174e-07,
1422
+ "loss": 0.8717,
1423
+ "step": 202
1424
+ },
1425
+ {
1426
+ "epoch": 12.992,
1427
+ "grad_norm": 0.1601700484752655,
1428
+ "learning_rate": 2.3404666103526542e-07,
1429
+ "loss": 0.9906,
1430
+ "step": 203
1431
+ },
1432
+ {
1433
+ "epoch": 13.056,
1434
+ "grad_norm": 0.14647288620471954,
1435
+ "learning_rate": 2.134025123396638e-07,
1436
+ "loss": 0.9804,
1437
+ "step": 204
1438
+ },
1439
+ {
1440
+ "epoch": 13.12,
1441
+ "grad_norm": 0.11194372177124023,
1442
+ "learning_rate": 1.9369152030840553e-07,
1443
+ "loss": 0.7664,
1444
+ "step": 205
1445
+ },
1446
+ {
1447
+ "epoch": 13.184,
1448
+ "grad_norm": 0.13287296891212463,
1449
+ "learning_rate": 1.7491752763844294e-07,
1450
+ "loss": 0.8259,
1451
+ "step": 206
1452
+ },
1453
+ {
1454
+ "epoch": 13.248,
1455
+ "grad_norm": 0.1248527243733406,
1456
+ "learning_rate": 1.5708419435684463e-07,
1457
+ "loss": 0.8892,
1458
+ "step": 207
1459
+ },
1460
+ {
1461
+ "epoch": 13.312,
1462
+ "grad_norm": 0.1182282567024231,
1463
+ "learning_rate": 1.4019499710726913e-07,
1464
+ "loss": 0.7666,
1465
+ "step": 208
1466
+ },
1467
+ {
1468
+ "epoch": 13.376,
1469
+ "grad_norm": 0.11040018498897552,
1470
+ "learning_rate": 1.2425322847218368e-07,
1471
+ "loss": 0.7489,
1472
+ "step": 209
1473
+ },
1474
+ {
1475
+ "epoch": 13.44,
1476
+ "grad_norm": 0.11782634258270264,
1477
+ "learning_rate": 1.0926199633097156e-07,
1478
+ "loss": 0.8304,
1479
+ "step": 210
1480
+ },
1481
+ {
1482
+ "epoch": 13.504,
1483
+ "grad_norm": 0.12430648505687714,
1484
+ "learning_rate": 9.522422325404234e-08,
1485
+ "loss": 0.941,
1486
+ "step": 211
1487
+ },
1488
+ {
1489
+ "epoch": 13.568,
1490
+ "grad_norm": 0.11995483934879303,
1491
+ "learning_rate": 8.214264593307097e-08,
1492
+ "loss": 0.8013,
1493
+ "step": 212
1494
+ },
1495
+ {
1496
+ "epoch": 13.632,
1497
+ "grad_norm": 0.12744170427322388,
1498
+ "learning_rate": 7.001981464747565e-08,
1499
+ "loss": 0.7954,
1500
+ "step": 213
1501
+ },
1502
+ {
1503
+ "epoch": 13.696,
1504
+ "grad_norm": 0.12261604517698288,
1505
+ "learning_rate": 5.8858092767236084e-08,
1506
+ "loss": 0.8749,
1507
+ "step": 214
1508
+ },
1509
+ {
1510
+ "epoch": 13.76,
1511
+ "grad_norm": 0.1196310818195343,
1512
+ "learning_rate": 4.865965629214819e-08,
1513
+ "loss": 0.7981,
1514
+ "step": 215
1515
+ },
1516
+ {
1517
+ "epoch": 13.824,
1518
+ "grad_norm": 0.12484210729598999,
1519
+ "learning_rate": 3.9426493427611177e-08,
1520
+ "loss": 0.7978,
1521
+ "step": 216
1522
+ },
1523
+ {
1524
+ "epoch": 13.888,
1525
+ "grad_norm": 0.12131179869174957,
1526
+ "learning_rate": 3.1160404197018155e-08,
1527
+ "loss": 0.7463,
1528
+ "step": 217
1529
+ },
1530
+ {
1531
+ "epoch": 13.952,
1532
+ "grad_norm": 0.1267390251159668,
1533
+ "learning_rate": 2.386300009084408e-08,
1534
+ "loss": 0.8298,
1535
+ "step": 218
1536
+ },
1537
+ {
1538
+ "epoch": 14.016,
1539
+ "grad_norm": 0.16648182272911072,
1540
+ "learning_rate": 1.753570375247815e-08,
1541
+ "loss": 1.154,
1542
+ "step": 219
1543
+ },
1544
+ {
1545
+ "epoch": 14.08,
1546
+ "grad_norm": 0.11439771205186844,
1547
+ "learning_rate": 1.2179748700879013e-08,
1548
+ "loss": 0.7684,
1549
+ "step": 220
1550
+ },
1551
+ {
1552
+ "epoch": 14.144,
1553
+ "grad_norm": 0.11785776168107986,
1554
+ "learning_rate": 7.796179090094891e-09,
1555
+ "loss": 0.7948,
1556
+ "step": 221
1557
+ },
1558
+ {
1559
+ "epoch": 14.208,
1560
+ "grad_norm": 0.11341831833124161,
1561
+ "learning_rate": 4.385849505708084e-09,
1562
+ "loss": 0.7567,
1563
+ "step": 222
1564
+ },
1565
+ {
1566
+ "epoch": 14.272,
1567
+ "grad_norm": 0.12832769751548767,
1568
+ "learning_rate": 1.9494247982282386e-09,
1569
+ "loss": 1.0097,
1570
+ "step": 223
1571
+ },
1572
+ {
1573
+ "epoch": 14.336,
1574
+ "grad_norm": 0.11620064079761505,
1575
+ "learning_rate": 4.87379953478806e-10,
1576
+ "loss": 0.7495,
1577
+ "step": 224
1578
+ },
1579
+ {
1580
+ "epoch": 14.4,
1581
+ "grad_norm": 0.12200789898633957,
1582
+ "learning_rate": 0.0,
1583
+ "loss": 0.8347,
1584
+ "step": 225
1585
+ }
1586
+ ],
1587
+ "logging_steps": 1,
1588
+ "max_steps": 225,
1589
+ "num_input_tokens_seen": 0,
1590
+ "num_train_epochs": 15,
1591
+ "save_steps": 75,
1592
+ "stateful_callbacks": {
1593
+ "TrainerControl": {
1594
+ "args": {
1595
+ "should_epoch_stop": false,
1596
+ "should_evaluate": false,
1597
+ "should_log": false,
1598
+ "should_save": true,
1599
+ "should_training_stop": true
1600
+ },
1601
+ "attributes": {}
1602
+ }
1603
+ },
1604
+ "total_flos": 125627307065344.0,
1605
+ "train_batch_size": 1,
1606
+ "trial_name": null,
1607
+ "trial_params": null
1608
+ }
s1k-1.1/full/checkpoint-225/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a9f455dc7902de14b90e50b602aa3f85b31052d9b16e340282d8fc3c06c90fb
3
+ size 7608
s1k-1.1/full/checkpoint-225/zero_to_fp32.py ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import json
25
+ from tqdm import tqdm
26
+ from collections import OrderedDict
27
+ from dataclasses import dataclass
28
+
29
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
30
+ # DeepSpeed data structures it has to be available in the current python environment.
31
+ from deepspeed.utils import logger
32
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
33
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
34
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
35
+
36
+
37
+ @dataclass
38
+ class zero_model_state:
39
+ buffers: dict()
40
+ param_shapes: dict()
41
+ shared_params: list
42
+ ds_version: int
43
+ frozen_param_shapes: dict()
44
+ frozen_param_fragments: dict()
45
+
46
+
47
+ debug = 0
48
+
49
+ # load to cpu
50
+ device = torch.device('cpu')
51
+
52
+
53
+ def atoi(text):
54
+ return int(text) if text.isdigit() else text
55
+
56
+
57
+ def natural_keys(text):
58
+ '''
59
+ alist.sort(key=natural_keys) sorts in human order
60
+ http://nedbatchelder.com/blog/200712/human_sorting.html
61
+ (See Toothy's implementation in the comments)
62
+ '''
63
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
64
+
65
+
66
+ def get_model_state_file(checkpoint_dir, zero_stage):
67
+ if not os.path.isdir(checkpoint_dir):
68
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
69
+
70
+ # there should be only one file
71
+ if zero_stage <= 2:
72
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
73
+ elif zero_stage == 3:
74
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
75
+
76
+ if not os.path.exists(file):
77
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
78
+
79
+ return file
80
+
81
+
82
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
83
+ # XXX: need to test that this simple glob rule works for multi-node setup too
84
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
85
+
86
+ if len(ckpt_files) == 0:
87
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
88
+
89
+ return ckpt_files
90
+
91
+
92
+ def get_optim_files(checkpoint_dir):
93
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
94
+
95
+
96
+ def get_model_state_files(checkpoint_dir):
97
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
98
+
99
+
100
+ def parse_model_states(files):
101
+ zero_model_states = []
102
+ for file in files:
103
+ state_dict = torch.load(file, map_location=device)
104
+
105
+ if BUFFER_NAMES not in state_dict:
106
+ raise ValueError(f"{file} is not a model state checkpoint")
107
+ buffer_names = state_dict[BUFFER_NAMES]
108
+ if debug:
109
+ print("Found buffers:", buffer_names)
110
+
111
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
112
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
113
+ param_shapes = state_dict[PARAM_SHAPES]
114
+
115
+ # collect parameters that are included in param_shapes
116
+ param_names = []
117
+ for s in param_shapes:
118
+ for name in s.keys():
119
+ param_names.append(name)
120
+
121
+ # update with frozen parameters
122
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
123
+ if frozen_param_shapes is not None:
124
+ if debug:
125
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
126
+ param_names += list(frozen_param_shapes.keys())
127
+
128
+ # handle shared params
129
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
130
+
131
+ ds_version = state_dict.get(DS_VERSION, None)
132
+
133
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
134
+
135
+ z_model_state = zero_model_state(buffers=buffers,
136
+ param_shapes=param_shapes,
137
+ shared_params=shared_params,
138
+ ds_version=ds_version,
139
+ frozen_param_shapes=frozen_param_shapes,
140
+ frozen_param_fragments=frozen_param_fragments)
141
+ zero_model_states.append(z_model_state)
142
+
143
+ return zero_model_states
144
+
145
+
146
+ def parse_optim_states(files, ds_checkpoint_dir):
147
+ total_files = len(files)
148
+ state_dicts = []
149
+ for f in files:
150
+ state_dict = torch.load(f, map_location=device)
151
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
152
+ # and also handle the case where it was already removed by another helper script
153
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
154
+ state_dicts.append(state_dict)
155
+
156
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
157
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
158
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
159
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
160
+
161
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
162
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
163
+ # use the max of the partition_count to get the dp world_size.
164
+
165
+ if type(world_size) is list:
166
+ world_size = max(world_size)
167
+
168
+ if world_size != total_files:
169
+ raise ValueError(
170
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
171
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
172
+ )
173
+
174
+ # the groups are named differently in each stage
175
+ if zero_stage <= 2:
176
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
177
+ elif zero_stage == 3:
178
+ fp32_groups_key = FP32_FLAT_GROUPS
179
+ else:
180
+ raise ValueError(f"unknown zero stage {zero_stage}")
181
+
182
+ if zero_stage <= 2:
183
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
184
+ elif zero_stage == 3:
185
+ # if there is more than one param group, there will be multiple flattened tensors - one
186
+ # flattened tensor per group - for simplicity merge them into a single tensor
187
+ #
188
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
189
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
190
+
191
+ fp32_flat_groups = [
192
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
193
+ ]
194
+
195
+ return zero_stage, world_size, fp32_flat_groups
196
+
197
+
198
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
199
+ """
200
+ Returns fp32 state_dict reconstructed from ds checkpoint
201
+
202
+ Args:
203
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
204
+
205
+ """
206
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
207
+
208
+ optim_files = get_optim_files(ds_checkpoint_dir)
209
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
210
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
211
+
212
+ model_files = get_model_state_files(ds_checkpoint_dir)
213
+
214
+ zero_model_states = parse_model_states(model_files)
215
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
216
+
217
+ if zero_stage <= 2:
218
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
219
+ exclude_frozen_parameters)
220
+ elif zero_stage == 3:
221
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
222
+ exclude_frozen_parameters)
223
+
224
+
225
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
226
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
227
+ return
228
+
229
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
230
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
231
+
232
+ if debug:
233
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
234
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
235
+
236
+ wanted_params = len(frozen_param_shapes)
237
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
238
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
239
+ print(f'Frozen params: Have {avail_numel} numels to process.')
240
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
241
+
242
+ total_params = 0
243
+ total_numel = 0
244
+ for name, shape in frozen_param_shapes.items():
245
+ total_params += 1
246
+ unpartitioned_numel = shape.numel()
247
+ total_numel += unpartitioned_numel
248
+
249
+ state_dict[name] = frozen_param_fragments[name]
250
+
251
+ if debug:
252
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
253
+
254
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
255
+
256
+
257
+ def _has_callable(obj, fn):
258
+ attr = getattr(obj, fn, None)
259
+ return callable(attr)
260
+
261
+
262
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
263
+ param_shapes = zero_model_states[0].param_shapes
264
+
265
+ # Reconstruction protocol:
266
+ #
267
+ # XXX: document this
268
+
269
+ if debug:
270
+ for i in range(world_size):
271
+ for j in range(len(fp32_flat_groups[0])):
272
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
273
+
274
+ # XXX: memory usage doubles here (zero2)
275
+ num_param_groups = len(fp32_flat_groups[0])
276
+ merged_single_partition_of_fp32_groups = []
277
+ for i in range(num_param_groups):
278
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
279
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
280
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
281
+ avail_numel = sum(
282
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
283
+
284
+ if debug:
285
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
286
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
287
+ # not asserting if there is a mismatch due to possible padding
288
+ print(f"Have {avail_numel} numels to process.")
289
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
290
+
291
+ # params
292
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
293
+ # out-of-core computing solution
294
+ total_numel = 0
295
+ total_params = 0
296
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
297
+ offset = 0
298
+ avail_numel = full_single_fp32_vector.numel()
299
+ for name, shape in shapes.items():
300
+
301
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
302
+ total_numel += unpartitioned_numel
303
+ total_params += 1
304
+
305
+ if debug:
306
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
307
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
308
+ offset += unpartitioned_numel
309
+
310
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
311
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
312
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
313
+ # live optimizer object, so we are checking that the numbers are within the right range
314
+ align_to = 2 * world_size
315
+
316
+ def zero2_align(x):
317
+ return align_to * math.ceil(x / align_to)
318
+
319
+ if debug:
320
+ print(f"original offset={offset}, avail_numel={avail_numel}")
321
+
322
+ offset = zero2_align(offset)
323
+ avail_numel = zero2_align(avail_numel)
324
+
325
+ if debug:
326
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
327
+
328
+ # Sanity check
329
+ if offset != avail_numel:
330
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
331
+
332
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
333
+
334
+
335
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
336
+ exclude_frozen_parameters):
337
+ state_dict = OrderedDict()
338
+
339
+ # buffers
340
+ buffers = zero_model_states[0].buffers
341
+ state_dict.update(buffers)
342
+ if debug:
343
+ print(f"added {len(buffers)} buffers")
344
+
345
+ if not exclude_frozen_parameters:
346
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
347
+
348
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
349
+
350
+ # recover shared parameters
351
+ for pair in zero_model_states[0].shared_params:
352
+ if pair[1] in state_dict:
353
+ state_dict[pair[0]] = state_dict[pair[1]]
354
+
355
+ return state_dict
356
+
357
+
358
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
359
+ remainder = unpartitioned_numel % world_size
360
+ padding_numel = (world_size - remainder) if remainder else 0
361
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
362
+ return partitioned_numel, padding_numel
363
+
364
+
365
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
366
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
367
+ return
368
+
369
+ if debug:
370
+ for i in range(world_size):
371
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
372
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
373
+
374
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
375
+ wanted_params = len(frozen_param_shapes)
376
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
377
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
378
+ print(f'Frozen params: Have {avail_numel} numels to process.')
379
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
380
+
381
+ total_params = 0
382
+ total_numel = 0
383
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
384
+ total_params += 1
385
+ unpartitioned_numel = shape.numel()
386
+ total_numel += unpartitioned_numel
387
+
388
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
389
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
390
+
391
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
392
+
393
+ if debug:
394
+ print(
395
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
396
+ )
397
+
398
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
399
+
400
+
401
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
402
+ param_shapes = zero_model_states[0].param_shapes
403
+ avail_numel = fp32_flat_groups[0].numel() * world_size
404
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
405
+ # param, re-consolidating each param, while dealing with padding if any
406
+
407
+ # merge list of dicts, preserving order
408
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
409
+
410
+ if debug:
411
+ for i in range(world_size):
412
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
413
+
414
+ wanted_params = len(param_shapes)
415
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
416
+ # not asserting if there is a mismatch due to possible padding
417
+ avail_numel = fp32_flat_groups[0].numel() * world_size
418
+ print(f"Trainable params: Have {avail_numel} numels to process.")
419
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
420
+
421
+ # params
422
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
423
+ # out-of-core computing solution
424
+ offset = 0
425
+ total_numel = 0
426
+ total_params = 0
427
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
428
+ unpartitioned_numel = shape.numel()
429
+ total_numel += unpartitioned_numel
430
+ total_params += 1
431
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
432
+
433
+ if debug:
434
+ print(
435
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
436
+ )
437
+
438
+ # XXX: memory usage doubles here
439
+ state_dict[name] = torch.cat(
440
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
441
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
442
+ offset += partitioned_numel
443
+
444
+ offset *= world_size
445
+
446
+ # Sanity check
447
+ if offset != avail_numel:
448
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
449
+
450
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
451
+
452
+
453
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
454
+ exclude_frozen_parameters):
455
+ state_dict = OrderedDict()
456
+
457
+ # buffers
458
+ buffers = zero_model_states[0].buffers
459
+ state_dict.update(buffers)
460
+ if debug:
461
+ print(f"added {len(buffers)} buffers")
462
+
463
+ if not exclude_frozen_parameters:
464
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
465
+
466
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
467
+
468
+ # recover shared parameters
469
+ for pair in zero_model_states[0].shared_params:
470
+ if pair[1] in state_dict:
471
+ state_dict[pair[0]] = state_dict[pair[1]]
472
+
473
+ return state_dict
474
+
475
+
476
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
477
+ """
478
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
479
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
480
+ via a model hub.
481
+
482
+ Args:
483
+ - ``checkpoint_dir``: path to the desired checkpoint folder
484
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
485
+ - ``exclude_frozen_parameters``: exclude frozen parameters
486
+
487
+ Returns:
488
+ - pytorch ``state_dict``
489
+
490
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
491
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
492
+ the checkpoint.
493
+
494
+ A typical usage might be ::
495
+
496
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
497
+ # do the training and checkpoint saving
498
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
499
+ model = model.cpu() # move to cpu
500
+ model.load_state_dict(state_dict)
501
+ # submit to model hub or save the model to share with others
502
+
503
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
504
+ application. i.e. you will need to re-initialize the deepspeed engine, since
505
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
506
+
507
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
508
+
509
+ """
510
+ if tag is None:
511
+ latest_path = os.path.join(checkpoint_dir, 'latest')
512
+ if os.path.isfile(latest_path):
513
+ with open(latest_path, 'r') as fd:
514
+ tag = fd.read().strip()
515
+ else:
516
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
517
+
518
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
519
+
520
+ if not os.path.isdir(ds_checkpoint_dir):
521
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
522
+
523
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
524
+
525
+
526
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
527
+ output_dir,
528
+ max_shard_size="5GB",
529
+ safe_serialization=False,
530
+ tag=None,
531
+ exclude_frozen_parameters=False):
532
+ """
533
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
534
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
535
+
536
+ Args:
537
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
538
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
539
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
540
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
541
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
542
+ - ``exclude_frozen_parameters``: exclude frozen parameters
543
+ """
544
+ # Dependency pre-check
545
+ if safe_serialization:
546
+ try:
547
+ from safetensors.torch import save_file
548
+ except ImportError:
549
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
550
+ raise
551
+ if max_shard_size is not None:
552
+ try:
553
+ from huggingface_hub import split_torch_state_dict_into_shards
554
+ except ImportError:
555
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
556
+ raise
557
+
558
+ # Convert zero checkpoint to state_dict
559
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
560
+
561
+ # Shard the model if it is too big.
562
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
563
+ if max_shard_size is not None:
564
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
565
+ state_dict_split = split_torch_state_dict_into_shards(state_dict,
566
+ filename_pattern=filename_pattern,
567
+ max_shard_size=max_shard_size)
568
+ else:
569
+ from collections import namedtuple
570
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
571
+ state_dict_split = StateDictSplit(is_sharded=False,
572
+ filename_to_tensors={weights_name: list(state_dict.keys())})
573
+
574
+ # Save the model
575
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
576
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
577
+ shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
578
+ output_path = os.path.join(output_dir, shard_file)
579
+ if safe_serialization:
580
+ save_file(shard, output_path, metadata={"format": "pt"})
581
+ else:
582
+ torch.save(shard, output_path)
583
+
584
+ # Save index if sharded
585
+ if state_dict_split.is_sharded:
586
+ index = {
587
+ "metadata": state_dict_split.metadata,
588
+ "weight_map": state_dict_split.tensor_to_filename,
589
+ }
590
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
591
+ save_index_file = os.path.join(output_dir, save_index_file)
592
+ with open(save_index_file, "w", encoding="utf-8") as f:
593
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
594
+ f.write(content)
595
+
596
+
597
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
598
+ """
599
+ 1. Put the provided model to cpu
600
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
601
+ 3. Load it into the provided model
602
+
603
+ Args:
604
+ - ``model``: the model object to update
605
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
606
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
607
+
608
+ Returns:
609
+ - ``model`: modified model
610
+
611
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
612
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
613
+ conveniently placed for you in the checkpoint folder.
614
+
615
+ A typical usage might be ::
616
+
617
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
618
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
619
+ # submit to model hub or save the model to share with others
620
+
621
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
622
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
623
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
624
+
625
+ """
626
+ logger.info(f"Extracting fp32 weights")
627
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
628
+
629
+ logger.info(f"Overwriting model with fp32 weights")
630
+ model = model.cpu()
631
+ model.load_state_dict(state_dict, strict=False)
632
+
633
+ return model
634
+
635
+
636
+ if __name__ == "__main__":
637
+ parser = argparse.ArgumentParser()
638
+ parser.add_argument("checkpoint_dir",
639
+ type=str,
640
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
641
+ parser.add_argument("output_dir",
642
+ type=str,
643
+ help="directory to the pytorch fp32 state_dict output files"
644
+ "(e.g. path/checkpoint-12-output/)")
645
+ parser.add_argument(
646
+ "--max_shard_size",
647
+ type=str,
648
+ default="5GB",
649
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
650
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
651
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
652
+ "without CPU OOM issues.")
653
+ parser.add_argument(
654
+ "--safe_serialization",
655
+ default=False,
656
+ action='store_true',
657
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
658
+ parser.add_argument("-t",
659
+ "--tag",
660
+ type=str,
661
+ default=None,
662
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
663
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
664
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
665
+ args = parser.parse_args()
666
+
667
+ debug = args.debug
668
+
669
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
670
+ args.output_dir,
671
+ max_shard_size=args.max_shard_size,
672
+ safe_serialization=args.safe_serialization,
673
+ tag=args.tag,
674
+ exclude_frozen_parameters=args.exclude_frozen_parameters)