diff --git a/checkpoint-150/config.json b/checkpoint-150/config.json deleted file mode 100644 index 82bacd02f94ab4a4cfdb7b12a0484fac8f301916..0000000000000000000000000000000000000000 --- a/checkpoint-150/config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "_name_or_path": "mistralai/Mistral-7B-v0.1", - "architectures": [ - "MistralForCausalLM" - ], - "bos_token_id": 1, - "eos_token_id": 2, - "hidden_act": "silu", - "hidden_size": 4096, - "initializer_range": 0.02, - "intermediate_size": 14336, - "max_position_embeddings": 32768, - "model_type": "mistral", - "num_attention_heads": 32, - "num_hidden_layers": 32, - "num_key_value_heads": 8, - "rms_norm_eps": 1e-05, - "rope_theta": 10000.0, - "sliding_window": 4096, - "tie_word_embeddings": false, - "torch_dtype": "bfloat16", - "transformers_version": "4.34.0.dev0", - "use_cache": false, - "vocab_size": 32002 -} diff --git a/checkpoint-150/generation_config.json b/checkpoint-150/generation_config.json deleted file mode 100644 index 2c5f418036a121b3fd432d1bf2b3c5c9daf59fab..0000000000000000000000000000000000000000 --- a/checkpoint-150/generation_config.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "_from_model_config": true, - "bos_token_id": 1, - "eos_token_id": 2, - "transformers_version": "4.34.0.dev0" -} diff --git a/checkpoint-150/latest b/checkpoint-150/latest deleted file mode 100644 index daf5be2c4861b36c6659b05fae8c31547db7f579..0000000000000000000000000000000000000000 --- a/checkpoint-150/latest +++ /dev/null @@ -1 +0,0 @@ -global_step150 \ No newline at end of file diff --git a/checkpoint-150/pytorch_model-00001-of-00002.bin b/checkpoint-150/pytorch_model-00001-of-00002.bin deleted file mode 100644 index ad9d9c0216e08e06631df3ed167f709cb5f91f70..0000000000000000000000000000000000000000 --- a/checkpoint-150/pytorch_model-00001-of-00002.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5dfffab3ff404e1f12ab78eeaa64de020182e23ef1fdf655b67f138f53b57776 -size 9943044428 diff --git a/checkpoint-150/pytorch_model-00002-of-00002.bin b/checkpoint-150/pytorch_model-00002-of-00002.bin deleted file mode 100644 index dde4607f61bde844207c2e106c839555d2c48d06..0000000000000000000000000000000000000000 --- a/checkpoint-150/pytorch_model-00002-of-00002.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b5d5ce3c93e1b594fb0c685762baf81fb339d78111f6fe4459084b45d5dbc36d -size 4540552031 diff --git a/checkpoint-150/pytorch_model.bin.index.json b/checkpoint-150/pytorch_model.bin.index.json deleted file mode 100644 index 53213fb82ddc02718be2ce686f00ba7fb0af95e7..0000000000000000000000000000000000000000 --- a/checkpoint-150/pytorch_model.bin.index.json +++ /dev/null @@ -1,298 +0,0 @@ -{ - "metadata": { - "total_size": 14483496960 - }, - "weight_map": { - "lm_head.weight": "pytorch_model-00002-of-00002.bin", - "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.norm.weight": "pytorch_model-00002-of-00002.bin" - } -} diff --git a/checkpoint-150/rng_state_0.pth b/checkpoint-150/rng_state_0.pth deleted file mode 100644 index 24aba7bcc2a9fb783bd13a6a67b96e5ad055d89d..0000000000000000000000000000000000000000 --- a/checkpoint-150/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1eafe3d5e0585dde8c5033613de99a5d4f23df4284a488f4007b3944580c0b97 -size 17655 diff --git a/checkpoint-150/rng_state_1.pth b/checkpoint-150/rng_state_1.pth deleted file mode 100644 index 6b2ef88173fde17f2b3e738a28446f89a0528a96..0000000000000000000000000000000000000000 --- a/checkpoint-150/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e34eb456d2d003a2839f2daa9425e99bdd79ed7e24a1de9fc7d5738476bfb4b -size 17655 diff --git a/checkpoint-150/rng_state_2.pth b/checkpoint-150/rng_state_2.pth deleted file mode 100644 index 7b118d52a3006aea6c44f23f94c5568d1fb0a2f3..0000000000000000000000000000000000000000 --- a/checkpoint-150/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b374af4a2765d8771cee7a72921d3c2e438b9bee34f0b2d098ce6071afeb65e4 -size 17655 diff --git a/checkpoint-150/rng_state_3.pth b/checkpoint-150/rng_state_3.pth deleted file mode 100644 index 3f6fd9aa58eb1d5815ca991134531a3280601900..0000000000000000000000000000000000000000 --- a/checkpoint-150/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5df75d8477fcc69c7abb03025313915ebfe3ac18c54a7c57aaa455c0099e13e5 -size 17655 diff --git a/checkpoint-150/trainer_state.json b/checkpoint-150/trainer_state.json deleted file mode 100644 index be94d2c7da8a5a56e771ef2733e94c9c2fc2730a..0000000000000000000000000000000000000000 --- a/checkpoint-150/trainer_state.json +++ /dev/null @@ -1,943 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 0.019846520243450648, - "eval_steps": 756, - "global_step": 150, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.0, - "learning_rate": 0.0, - "loss": 0.9197, - "step": 1 - }, - { - "epoch": 0.0, - "eval_loss": 1.4652303457260132, - "eval_runtime": 2.1726, - "eval_samples_per_second": 79.627, - "eval_steps_per_second": 3.682, - "step": 1 - }, - { - "epoch": 0.0, - "eval_bench_accuracy_agieval": 0.2711864406779661, - "eval_bench_accuracy_arc_challenge": 0.8703703703703703, - "eval_bench_accuracy_arc_easy": 0.9259259259259259, - "eval_bench_accuracy_bigbench": 0.36065573770491804, - "eval_bench_accuracy_boolq": 0.5740740740740741, - "eval_bench_accuracy_mmlu": 0.5185185185185185, - "eval_bench_accuracy_openbookqa": 0.1111111111111111, - "eval_bench_accuracy_truthful_qa": 0.3584905660377358, - "eval_bench_accuracy_winogrande": 0.4444444444444444, - "eval_bench_average_accuracy": 0.4927530209850072, - "eval_bench_loss": 2.6978388407144203, - "eval_bench_total_accuracy": 0.48893360160965793, - "step": 1 - }, - { - "epoch": 0.0, - "learning_rate": 6.000000000000001e-07, - "loss": 1.3426, - "step": 2 - }, - { - "epoch": 0.0, - "learning_rate": 1.2000000000000002e-06, - "loss": 1.5882, - "step": 3 - }, - { - "epoch": 0.0, - "learning_rate": 1.8e-06, - "loss": 0.8542, - "step": 4 - }, - { - "epoch": 0.0, - "learning_rate": 2.4000000000000003e-06, - "loss": 0.9629, - "step": 5 - }, - { - "epoch": 0.0, - "learning_rate": 3e-06, - "loss": 0.903, - "step": 6 - }, - { - "epoch": 0.0, - "learning_rate": 3.6e-06, - "loss": 0.909, - "step": 7 - }, - { - "epoch": 0.0, - "learning_rate": 4.2e-06, - "loss": 0.8666, - "step": 8 - }, - { - "epoch": 0.0, - "learning_rate": 4.800000000000001e-06, - "loss": 1.0108, - "step": 9 - }, - { - "epoch": 0.0, - "learning_rate": 5.4e-06, - "loss": 0.8958, - "step": 10 - }, - { - "epoch": 0.0, - "learning_rate": 6e-06, - "loss": 0.9348, - "step": 11 - }, - { - "epoch": 0.0, - "learning_rate": 5.999602806831722e-06, - "loss": 0.7832, - "step": 12 - }, - { - "epoch": 0.0, - "learning_rate": 5.999205613663445e-06, - "loss": 0.8083, - "step": 13 - }, - { - "epoch": 0.0, - "learning_rate": 5.9988084204951675e-06, - "loss": 0.8164, - "step": 14 - }, - { - "epoch": 0.0, - "learning_rate": 5.99841122732689e-06, - "loss": 0.7834, - "step": 15 - }, - { - "epoch": 0.0, - "learning_rate": 5.998014034158613e-06, - "loss": 0.8718, - "step": 16 - }, - { - "epoch": 0.0, - "learning_rate": 5.997616840990336e-06, - "loss": 0.84, - "step": 17 - }, - { - "epoch": 0.0, - "learning_rate": 5.997219647822058e-06, - "loss": 0.7397, - "step": 18 - }, - { - "epoch": 0.0, - "learning_rate": 5.99682245465378e-06, - "loss": 0.7445, - "step": 19 - }, - { - "epoch": 0.0, - "learning_rate": 5.996425261485502e-06, - "loss": 0.7898, - "step": 20 - }, - { - "epoch": 0.0, - "learning_rate": 5.996028068317225e-06, - "loss": 0.7388, - "step": 21 - }, - { - "epoch": 0.0, - "learning_rate": 5.9956308751489475e-06, - "loss": 0.7296, - "step": 22 - }, - { - "epoch": 0.0, - "learning_rate": 5.99523368198067e-06, - "loss": 0.7993, - "step": 23 - }, - { - "epoch": 0.0, - "learning_rate": 5.994836488812393e-06, - "loss": 0.7188, - "step": 24 - }, - { - "epoch": 0.0, - "learning_rate": 5.994439295644115e-06, - "loss": 0.7473, - "step": 25 - }, - { - "epoch": 0.0, - "learning_rate": 5.994042102475838e-06, - "loss": 0.6997, - "step": 26 - }, - { - "epoch": 0.0, - "learning_rate": 5.99364490930756e-06, - "loss": 0.725, - "step": 27 - }, - { - "epoch": 0.0, - "learning_rate": 5.993247716139283e-06, - "loss": 0.7272, - "step": 28 - }, - { - "epoch": 0.0, - "learning_rate": 5.992850522971005e-06, - "loss": 0.7427, - "step": 29 - }, - { - "epoch": 0.0, - "learning_rate": 5.992453329802727e-06, - "loss": 0.7309, - "step": 30 - }, - { - "epoch": 0.0, - "learning_rate": 5.99205613663445e-06, - "loss": 0.6764, - "step": 31 - }, - { - "epoch": 0.0, - "learning_rate": 5.991658943466173e-06, - "loss": 0.7556, - "step": 32 - }, - { - "epoch": 0.0, - "learning_rate": 5.991261750297895e-06, - "loss": 0.7301, - "step": 33 - }, - { - "epoch": 0.0, - "learning_rate": 5.990864557129617e-06, - "loss": 0.6776, - "step": 34 - }, - { - "epoch": 0.0, - "learning_rate": 5.99046736396134e-06, - "loss": 0.6884, - "step": 35 - }, - { - "epoch": 0.0, - "learning_rate": 5.990070170793063e-06, - "loss": 0.7179, - "step": 36 - }, - { - "epoch": 0.0, - "learning_rate": 5.989672977624785e-06, - "loss": 0.6915, - "step": 37 - }, - { - "epoch": 0.01, - "learning_rate": 5.989275784456507e-06, - "loss": 0.7308, - "step": 38 - }, - { - "epoch": 0.01, - "learning_rate": 5.98887859128823e-06, - "loss": 0.6743, - "step": 39 - }, - { - "epoch": 0.01, - "learning_rate": 5.9884813981199526e-06, - "loss": 0.6604, - "step": 40 - }, - { - "epoch": 0.01, - "learning_rate": 5.988084204951675e-06, - "loss": 0.6609, - "step": 41 - }, - { - "epoch": 0.01, - "learning_rate": 5.987687011783397e-06, - "loss": 0.6524, - "step": 42 - }, - { - "epoch": 0.01, - "learning_rate": 5.98728981861512e-06, - "loss": 0.6386, - "step": 43 - }, - { - "epoch": 0.01, - "learning_rate": 5.986892625446843e-06, - "loss": 0.728, - "step": 44 - }, - { - "epoch": 0.01, - "learning_rate": 5.986495432278565e-06, - "loss": 0.6971, - "step": 45 - }, - { - "epoch": 0.01, - "learning_rate": 5.986098239110287e-06, - "loss": 0.6772, - "step": 46 - }, - { - "epoch": 0.01, - "learning_rate": 5.98570104594201e-06, - "loss": 0.6774, - "step": 47 - }, - { - "epoch": 0.01, - "learning_rate": 5.9853038527737325e-06, - "loss": 0.6868, - "step": 48 - }, - { - "epoch": 0.01, - "learning_rate": 5.984906659605455e-06, - "loss": 0.7169, - "step": 49 - }, - { - "epoch": 0.01, - "learning_rate": 5.984509466437178e-06, - "loss": 0.669, - "step": 50 - }, - { - "epoch": 0.01, - "learning_rate": 5.9841122732689e-06, - "loss": 0.7112, - "step": 51 - }, - { - "epoch": 0.01, - "learning_rate": 5.983715080100622e-06, - "loss": 0.6667, - "step": 52 - }, - { - "epoch": 0.01, - "learning_rate": 5.983317886932344e-06, - "loss": 0.6528, - "step": 53 - }, - { - "epoch": 0.01, - "learning_rate": 5.982920693764068e-06, - "loss": 0.6699, - "step": 54 - }, - { - "epoch": 0.01, - "learning_rate": 5.98252350059579e-06, - "loss": 0.6584, - "step": 55 - }, - { - "epoch": 0.01, - "learning_rate": 5.9821263074275125e-06, - "loss": 0.6328, - "step": 56 - }, - { - "epoch": 0.01, - "learning_rate": 5.981729114259235e-06, - "loss": 0.6472, - "step": 57 - }, - { - "epoch": 0.01, - "learning_rate": 5.981331921090958e-06, - "loss": 0.6992, - "step": 58 - }, - { - "epoch": 0.01, - "learning_rate": 5.98093472792268e-06, - "loss": 0.6666, - "step": 59 - }, - { - "epoch": 0.01, - "learning_rate": 5.980537534754402e-06, - "loss": 0.6819, - "step": 60 - }, - { - "epoch": 0.01, - "learning_rate": 5.980140341586125e-06, - "loss": 0.705, - "step": 61 - }, - { - "epoch": 0.01, - "learning_rate": 5.979743148417847e-06, - "loss": 0.6871, - "step": 62 - }, - { - "epoch": 0.01, - "learning_rate": 5.97934595524957e-06, - "loss": 0.6998, - "step": 63 - }, - { - "epoch": 0.01, - "learning_rate": 5.978948762081292e-06, - "loss": 0.6081, - "step": 64 - }, - { - "epoch": 0.01, - "learning_rate": 5.9785515689130154e-06, - "loss": 0.6985, - "step": 65 - }, - { - "epoch": 0.01, - "learning_rate": 5.978154375744738e-06, - "loss": 0.6631, - "step": 66 - }, - { - "epoch": 0.01, - "learning_rate": 5.97775718257646e-06, - "loss": 0.6534, - "step": 67 - }, - { - "epoch": 0.01, - "learning_rate": 5.977359989408182e-06, - "loss": 0.6685, - "step": 68 - }, - { - "epoch": 0.01, - "learning_rate": 5.976962796239905e-06, - "loss": 0.6821, - "step": 69 - }, - { - "epoch": 0.01, - "learning_rate": 5.976565603071627e-06, - "loss": 0.6241, - "step": 70 - }, - { - "epoch": 0.01, - "learning_rate": 5.976168409903349e-06, - "loss": 0.6357, - "step": 71 - }, - { - "epoch": 0.01, - "learning_rate": 5.975771216735072e-06, - "loss": 0.6466, - "step": 72 - }, - { - "epoch": 0.01, - "learning_rate": 5.975374023566795e-06, - "loss": 0.6579, - "step": 73 - }, - { - "epoch": 0.01, - "learning_rate": 5.9749768303985176e-06, - "loss": 0.6298, - "step": 74 - }, - { - "epoch": 0.01, - "learning_rate": 5.97457963723024e-06, - "loss": 0.703, - "step": 75 - }, - { - "epoch": 0.01, - "learning_rate": 5.974182444061963e-06, - "loss": 0.6152, - "step": 76 - }, - { - "epoch": 0.01, - "learning_rate": 5.973785250893685e-06, - "loss": 0.6682, - "step": 77 - }, - { - "epoch": 0.01, - "learning_rate": 5.973388057725407e-06, - "loss": 0.6427, - "step": 78 - }, - { - "epoch": 0.01, - "learning_rate": 5.972990864557129e-06, - "loss": 0.6969, - "step": 79 - }, - { - "epoch": 0.01, - "learning_rate": 5.972593671388852e-06, - "loss": 0.6619, - "step": 80 - }, - { - "epoch": 0.01, - "learning_rate": 5.9721964782205745e-06, - "loss": 0.6332, - "step": 81 - }, - { - "epoch": 0.01, - "learning_rate": 5.9717992850522975e-06, - "loss": 0.6203, - "step": 82 - }, - { - "epoch": 0.01, - "learning_rate": 5.97140209188402e-06, - "loss": 0.6463, - "step": 83 - }, - { - "epoch": 0.01, - "learning_rate": 5.971004898715743e-06, - "loss": 0.6718, - "step": 84 - }, - { - "epoch": 0.01, - "learning_rate": 5.970607705547465e-06, - "loss": 0.6495, - "step": 85 - }, - { - "epoch": 0.01, - "learning_rate": 5.970210512379187e-06, - "loss": 0.5787, - "step": 86 - }, - { - "epoch": 0.01, - "learning_rate": 5.96981331921091e-06, - "loss": 0.6897, - "step": 87 - }, - { - "epoch": 0.01, - "learning_rate": 5.969416126042632e-06, - "loss": 0.6688, - "step": 88 - }, - { - "epoch": 0.01, - "learning_rate": 5.9690189328743544e-06, - "loss": 0.6697, - "step": 89 - }, - { - "epoch": 0.01, - "learning_rate": 5.968621739706077e-06, - "loss": 0.6156, - "step": 90 - }, - { - "epoch": 0.01, - "learning_rate": 5.9682245465378e-06, - "loss": 0.6301, - "step": 91 - }, - { - "epoch": 0.01, - "learning_rate": 5.967827353369523e-06, - "loss": 0.6121, - "step": 92 - }, - { - "epoch": 0.01, - "learning_rate": 5.967430160201245e-06, - "loss": 0.6177, - "step": 93 - }, - { - "epoch": 0.01, - "learning_rate": 5.967032967032967e-06, - "loss": 0.611, - "step": 94 - }, - { - "epoch": 0.01, - "learning_rate": 5.96663577386469e-06, - "loss": 0.6359, - "step": 95 - }, - { - "epoch": 0.01, - "learning_rate": 5.966238580696412e-06, - "loss": 0.6417, - "step": 96 - }, - { - "epoch": 0.01, - "learning_rate": 5.965841387528134e-06, - "loss": 0.6312, - "step": 97 - }, - { - "epoch": 0.01, - "learning_rate": 5.965444194359857e-06, - "loss": 0.6184, - "step": 98 - }, - { - "epoch": 0.01, - "learning_rate": 5.9650470011915796e-06, - "loss": 0.6724, - "step": 99 - }, - { - "epoch": 0.01, - "learning_rate": 5.964649808023302e-06, - "loss": 0.6833, - "step": 100 - }, - { - "epoch": 0.01, - "learning_rate": 5.964252614855025e-06, - "loss": 0.6433, - "step": 101 - }, - { - "epoch": 0.01, - "learning_rate": 5.963855421686747e-06, - "loss": 0.6766, - "step": 102 - }, - { - "epoch": 0.01, - "learning_rate": 5.96345822851847e-06, - "loss": 0.6527, - "step": 103 - }, - { - "epoch": 0.01, - "learning_rate": 5.963061035350192e-06, - "loss": 0.5982, - "step": 104 - }, - { - "epoch": 0.01, - "learning_rate": 5.962663842181914e-06, - "loss": 0.6749, - "step": 105 - }, - { - "epoch": 0.01, - "learning_rate": 5.962266649013637e-06, - "loss": 0.6494, - "step": 106 - }, - { - "epoch": 0.01, - "learning_rate": 5.9618694558453595e-06, - "loss": 0.6998, - "step": 107 - }, - { - "epoch": 0.01, - "learning_rate": 5.961472262677082e-06, - "loss": 0.6112, - "step": 108 - }, - { - "epoch": 0.01, - "learning_rate": 5.961075069508805e-06, - "loss": 0.624, - "step": 109 - }, - { - "epoch": 0.01, - "learning_rate": 5.960677876340528e-06, - "loss": 0.6329, - "step": 110 - }, - { - "epoch": 0.01, - "learning_rate": 5.96028068317225e-06, - "loss": 0.6491, - "step": 111 - }, - { - "epoch": 0.01, - "learning_rate": 5.959883490003972e-06, - "loss": 0.6672, - "step": 112 - }, - { - "epoch": 0.01, - "learning_rate": 5.959486296835694e-06, - "loss": 0.6279, - "step": 113 - }, - { - "epoch": 0.02, - "learning_rate": 5.959089103667417e-06, - "loss": 0.6479, - "step": 114 - }, - { - "epoch": 0.02, - "learning_rate": 5.9586919104991395e-06, - "loss": 0.6214, - "step": 115 - }, - { - "epoch": 0.02, - "learning_rate": 5.958294717330862e-06, - "loss": 0.6618, - "step": 116 - }, - { - "epoch": 0.02, - "learning_rate": 5.957897524162585e-06, - "loss": 0.6703, - "step": 117 - }, - { - "epoch": 0.02, - "learning_rate": 5.957500330994307e-06, - "loss": 0.6417, - "step": 118 - }, - { - "epoch": 0.02, - "learning_rate": 5.957103137826029e-06, - "loss": 0.631, - "step": 119 - }, - { - "epoch": 0.02, - "learning_rate": 5.956705944657752e-06, - "loss": 0.6169, - "step": 120 - }, - { - "epoch": 0.02, - "learning_rate": 5.956308751489475e-06, - "loss": 0.6521, - "step": 121 - }, - { - "epoch": 0.02, - "learning_rate": 5.955911558321197e-06, - "loss": 0.6635, - "step": 122 - }, - { - "epoch": 0.02, - "learning_rate": 5.955514365152919e-06, - "loss": 0.6496, - "step": 123 - }, - { - "epoch": 0.02, - "learning_rate": 5.955117171984642e-06, - "loss": 0.6431, - "step": 124 - }, - { - "epoch": 0.02, - "learning_rate": 5.954719978816365e-06, - "loss": 0.6246, - "step": 125 - }, - { - "epoch": 0.02, - "learning_rate": 5.954322785648087e-06, - "loss": 0.6557, - "step": 126 - }, - { - "epoch": 0.02, - "learning_rate": 5.953925592479809e-06, - "loss": 0.6082, - "step": 127 - }, - { - "epoch": 0.02, - "learning_rate": 5.953528399311532e-06, - "loss": 0.5941, - "step": 128 - }, - { - "epoch": 0.02, - "learning_rate": 5.953131206143255e-06, - "loss": 0.6566, - "step": 129 - }, - { - "epoch": 0.02, - "learning_rate": 5.952734012974977e-06, - "loss": 0.6243, - "step": 130 - }, - { - "epoch": 0.02, - "learning_rate": 5.952336819806699e-06, - "loss": 0.594, - "step": 131 - }, - { - "epoch": 0.02, - "learning_rate": 5.951939626638422e-06, - "loss": 0.68, - "step": 132 - }, - { - "epoch": 0.02, - "learning_rate": 5.9515424334701446e-06, - "loss": 0.6302, - "step": 133 - }, - { - "epoch": 0.02, - "learning_rate": 5.951145240301867e-06, - "loss": 0.6251, - "step": 134 - }, - { - "epoch": 0.02, - "learning_rate": 5.950748047133589e-06, - "loss": 0.6326, - "step": 135 - }, - { - "epoch": 0.02, - "learning_rate": 5.950350853965312e-06, - "loss": 0.6314, - "step": 136 - }, - { - "epoch": 0.02, - "learning_rate": 5.949953660797034e-06, - "loss": 0.6598, - "step": 137 - }, - { - "epoch": 0.02, - "learning_rate": 5.949556467628757e-06, - "loss": 0.6583, - "step": 138 - }, - { - "epoch": 0.02, - "learning_rate": 5.949159274460479e-06, - "loss": 0.6162, - "step": 139 - }, - { - "epoch": 0.02, - "learning_rate": 5.948762081292202e-06, - "loss": 0.7042, - "step": 140 - }, - { - "epoch": 0.02, - "learning_rate": 5.9483648881239245e-06, - "loss": 0.6733, - "step": 141 - }, - { - "epoch": 0.02, - "learning_rate": 5.947967694955647e-06, - "loss": 0.6103, - "step": 142 - }, - { - "epoch": 0.02, - "learning_rate": 5.94757050178737e-06, - "loss": 0.6269, - "step": 143 - }, - { - "epoch": 0.02, - "learning_rate": 5.947173308619092e-06, - "loss": 0.663, - "step": 144 - }, - { - "epoch": 0.02, - "learning_rate": 5.946776115450814e-06, - "loss": 0.5794, - "step": 145 - }, - { - "epoch": 0.02, - "learning_rate": 5.946378922282537e-06, - "loss": 0.6868, - "step": 146 - }, - { - "epoch": 0.02, - "learning_rate": 5.945981729114259e-06, - "loss": 0.6064, - "step": 147 - }, - { - "epoch": 0.02, - "learning_rate": 5.945584535945982e-06, - "loss": 0.6519, - "step": 148 - }, - { - "epoch": 0.02, - "learning_rate": 5.9451873427777045e-06, - "loss": 0.655, - "step": 149 - }, - { - "epoch": 0.02, - "learning_rate": 5.944790149609427e-06, - "loss": 0.6617, - "step": 150 - } - ], - "logging_steps": 1, - "max_steps": 15116, - "num_train_epochs": 2, - "save_steps": 50, - "total_flos": 6.291064218451968e+17, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-150/training_args.bin b/checkpoint-150/training_args.bin deleted file mode 100644 index 731dabe55350d521ab0dfde0b2f023771c347250..0000000000000000000000000000000000000000 --- a/checkpoint-150/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2f05be88d930176935da1678b48a8294634889bf7ae4f8bebdbaca140c2dac08 -size 5947 diff --git a/checkpoint-150/zero_to_fp32.py b/checkpoint-150/zero_to_fp32.py deleted file mode 100755 index c98caae31534368be22b67fc4ae906836c992a8d..0000000000000000000000000000000000000000 --- a/checkpoint-150/zero_to_fp32.py +++ /dev/null @@ -1,587 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag) diff --git a/checkpoint-200/config.json b/checkpoint-200/config.json deleted file mode 100644 index 82bacd02f94ab4a4cfdb7b12a0484fac8f301916..0000000000000000000000000000000000000000 --- a/checkpoint-200/config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "_name_or_path": "mistralai/Mistral-7B-v0.1", - "architectures": [ - "MistralForCausalLM" - ], - "bos_token_id": 1, - "eos_token_id": 2, - "hidden_act": "silu", - "hidden_size": 4096, - "initializer_range": 0.02, - "intermediate_size": 14336, - "max_position_embeddings": 32768, - "model_type": "mistral", - "num_attention_heads": 32, - "num_hidden_layers": 32, - "num_key_value_heads": 8, - "rms_norm_eps": 1e-05, - "rope_theta": 10000.0, - "sliding_window": 4096, - "tie_word_embeddings": false, - "torch_dtype": "bfloat16", - "transformers_version": "4.34.0.dev0", - "use_cache": false, - "vocab_size": 32002 -} diff --git a/checkpoint-200/generation_config.json b/checkpoint-200/generation_config.json deleted file mode 100644 index 2c5f418036a121b3fd432d1bf2b3c5c9daf59fab..0000000000000000000000000000000000000000 --- a/checkpoint-200/generation_config.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "_from_model_config": true, - "bos_token_id": 1, - "eos_token_id": 2, - "transformers_version": "4.34.0.dev0" -} diff --git a/checkpoint-200/latest b/checkpoint-200/latest deleted file mode 100644 index 753e24e10f3a2489150f458205cf759fd8b6081f..0000000000000000000000000000000000000000 --- a/checkpoint-200/latest +++ /dev/null @@ -1 +0,0 @@ -global_step200 \ No newline at end of file diff --git a/checkpoint-200/pytorch_model-00001-of-00002.bin b/checkpoint-200/pytorch_model-00001-of-00002.bin deleted file mode 100644 index 53d0ed7838534f1a38ff5ed2e576180ed16a7519..0000000000000000000000000000000000000000 --- a/checkpoint-200/pytorch_model-00001-of-00002.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:903412ad8d63a3544a84531bed488838561c60f33953ec8821e76bb9806cdf31 -size 9943044428 diff --git a/checkpoint-200/pytorch_model-00002-of-00002.bin b/checkpoint-200/pytorch_model-00002-of-00002.bin deleted file mode 100644 index 83013b21614d33a616a27ba384d0f6a55c469355..0000000000000000000000000000000000000000 --- a/checkpoint-200/pytorch_model-00002-of-00002.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2ee97a78f24972026adf4f389f4fe546b265d44003a9d0533a43de09bc36f2fd -size 4540552031 diff --git a/checkpoint-200/pytorch_model.bin.index.json b/checkpoint-200/pytorch_model.bin.index.json deleted file mode 100644 index 53213fb82ddc02718be2ce686f00ba7fb0af95e7..0000000000000000000000000000000000000000 --- a/checkpoint-200/pytorch_model.bin.index.json +++ /dev/null @@ -1,298 +0,0 @@ -{ - "metadata": { - "total_size": 14483496960 - }, - "weight_map": { - "lm_head.weight": "pytorch_model-00002-of-00002.bin", - "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.norm.weight": "pytorch_model-00002-of-00002.bin" - } -} diff --git a/checkpoint-200/rng_state_0.pth b/checkpoint-200/rng_state_0.pth deleted file mode 100644 index 24aba7bcc2a9fb783bd13a6a67b96e5ad055d89d..0000000000000000000000000000000000000000 --- a/checkpoint-200/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1eafe3d5e0585dde8c5033613de99a5d4f23df4284a488f4007b3944580c0b97 -size 17655 diff --git a/checkpoint-200/rng_state_1.pth b/checkpoint-200/rng_state_1.pth deleted file mode 100644 index 6b2ef88173fde17f2b3e738a28446f89a0528a96..0000000000000000000000000000000000000000 --- a/checkpoint-200/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e34eb456d2d003a2839f2daa9425e99bdd79ed7e24a1de9fc7d5738476bfb4b -size 17655 diff --git a/checkpoint-200/rng_state_2.pth b/checkpoint-200/rng_state_2.pth deleted file mode 100644 index 7b118d52a3006aea6c44f23f94c5568d1fb0a2f3..0000000000000000000000000000000000000000 --- a/checkpoint-200/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b374af4a2765d8771cee7a72921d3c2e438b9bee34f0b2d098ce6071afeb65e4 -size 17655 diff --git a/checkpoint-200/rng_state_3.pth b/checkpoint-200/rng_state_3.pth deleted file mode 100644 index 3f6fd9aa58eb1d5815ca991134531a3280601900..0000000000000000000000000000000000000000 --- a/checkpoint-200/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5df75d8477fcc69c7abb03025313915ebfe3ac18c54a7c57aaa455c0099e13e5 -size 17655 diff --git a/checkpoint-200/trainer_state.json b/checkpoint-200/trainer_state.json deleted file mode 100644 index 5584be8c62df62d622a91aff21471107f042b883..0000000000000000000000000000000000000000 --- a/checkpoint-200/trainer_state.json +++ /dev/null @@ -1,1243 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 0.02646202699126753, - "eval_steps": 756, - "global_step": 200, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.0, - "learning_rate": 0.0, - "loss": 0.9197, - "step": 1 - }, - { - "epoch": 0.0, - "eval_loss": 1.4652303457260132, - "eval_runtime": 2.1726, - "eval_samples_per_second": 79.627, - "eval_steps_per_second": 3.682, - "step": 1 - }, - { - "epoch": 0.0, - "eval_bench_accuracy_agieval": 0.2711864406779661, - "eval_bench_accuracy_arc_challenge": 0.8703703703703703, - "eval_bench_accuracy_arc_easy": 0.9259259259259259, - "eval_bench_accuracy_bigbench": 0.36065573770491804, - "eval_bench_accuracy_boolq": 0.5740740740740741, - "eval_bench_accuracy_mmlu": 0.5185185185185185, - "eval_bench_accuracy_openbookqa": 0.1111111111111111, - "eval_bench_accuracy_truthful_qa": 0.3584905660377358, - "eval_bench_accuracy_winogrande": 0.4444444444444444, - "eval_bench_average_accuracy": 0.4927530209850072, - "eval_bench_loss": 2.6978388407144203, - "eval_bench_total_accuracy": 0.48893360160965793, - "step": 1 - }, - { - "epoch": 0.0, - "learning_rate": 6.000000000000001e-07, - "loss": 1.3426, - "step": 2 - }, - { - "epoch": 0.0, - "learning_rate": 1.2000000000000002e-06, - "loss": 1.5882, - "step": 3 - }, - { - "epoch": 0.0, - "learning_rate": 1.8e-06, - "loss": 0.8542, - "step": 4 - }, - { - "epoch": 0.0, - "learning_rate": 2.4000000000000003e-06, - "loss": 0.9629, - "step": 5 - }, - { - "epoch": 0.0, - "learning_rate": 3e-06, - "loss": 0.903, - "step": 6 - }, - { - "epoch": 0.0, - "learning_rate": 3.6e-06, - "loss": 0.909, - "step": 7 - }, - { - "epoch": 0.0, - "learning_rate": 4.2e-06, - "loss": 0.8666, - "step": 8 - }, - { - "epoch": 0.0, - "learning_rate": 4.800000000000001e-06, - "loss": 1.0108, - "step": 9 - }, - { - "epoch": 0.0, - "learning_rate": 5.4e-06, - "loss": 0.8958, - "step": 10 - }, - { - "epoch": 0.0, - "learning_rate": 6e-06, - "loss": 0.9348, - "step": 11 - }, - { - "epoch": 0.0, - "learning_rate": 5.999602806831722e-06, - "loss": 0.7832, - "step": 12 - }, - { - "epoch": 0.0, - "learning_rate": 5.999205613663445e-06, - "loss": 0.8083, - "step": 13 - }, - { - "epoch": 0.0, - "learning_rate": 5.9988084204951675e-06, - "loss": 0.8164, - "step": 14 - }, - { - "epoch": 0.0, - "learning_rate": 5.99841122732689e-06, - "loss": 0.7834, - "step": 15 - }, - { - "epoch": 0.0, - "learning_rate": 5.998014034158613e-06, - "loss": 0.8718, - "step": 16 - }, - { - "epoch": 0.0, - "learning_rate": 5.997616840990336e-06, - "loss": 0.84, - "step": 17 - }, - { - "epoch": 0.0, - "learning_rate": 5.997219647822058e-06, - "loss": 0.7397, - "step": 18 - }, - { - "epoch": 0.0, - "learning_rate": 5.99682245465378e-06, - "loss": 0.7445, - "step": 19 - }, - { - "epoch": 0.0, - "learning_rate": 5.996425261485502e-06, - "loss": 0.7898, - "step": 20 - }, - { - "epoch": 0.0, - "learning_rate": 5.996028068317225e-06, - "loss": 0.7388, - "step": 21 - }, - { - "epoch": 0.0, - "learning_rate": 5.9956308751489475e-06, - "loss": 0.7296, - "step": 22 - }, - { - "epoch": 0.0, - "learning_rate": 5.99523368198067e-06, - "loss": 0.7993, - "step": 23 - }, - { - "epoch": 0.0, - "learning_rate": 5.994836488812393e-06, - "loss": 0.7188, - "step": 24 - }, - { - "epoch": 0.0, - "learning_rate": 5.994439295644115e-06, - "loss": 0.7473, - "step": 25 - }, - { - "epoch": 0.0, - "learning_rate": 5.994042102475838e-06, - "loss": 0.6997, - "step": 26 - }, - { - "epoch": 0.0, - "learning_rate": 5.99364490930756e-06, - "loss": 0.725, - "step": 27 - }, - { - "epoch": 0.0, - "learning_rate": 5.993247716139283e-06, - "loss": 0.7272, - "step": 28 - }, - { - "epoch": 0.0, - "learning_rate": 5.992850522971005e-06, - "loss": 0.7427, - "step": 29 - }, - { - "epoch": 0.0, - "learning_rate": 5.992453329802727e-06, - "loss": 0.7309, - "step": 30 - }, - { - "epoch": 0.0, - "learning_rate": 5.99205613663445e-06, - "loss": 0.6764, - "step": 31 - }, - { - "epoch": 0.0, - "learning_rate": 5.991658943466173e-06, - "loss": 0.7556, - "step": 32 - }, - { - "epoch": 0.0, - "learning_rate": 5.991261750297895e-06, - "loss": 0.7301, - "step": 33 - }, - { - "epoch": 0.0, - "learning_rate": 5.990864557129617e-06, - "loss": 0.6776, - "step": 34 - }, - { - "epoch": 0.0, - "learning_rate": 5.99046736396134e-06, - "loss": 0.6884, - "step": 35 - }, - { - "epoch": 0.0, - "learning_rate": 5.990070170793063e-06, - "loss": 0.7179, - "step": 36 - }, - { - "epoch": 0.0, - "learning_rate": 5.989672977624785e-06, - "loss": 0.6915, - "step": 37 - }, - { - "epoch": 0.01, - "learning_rate": 5.989275784456507e-06, - "loss": 0.7308, - "step": 38 - }, - { - "epoch": 0.01, - "learning_rate": 5.98887859128823e-06, - "loss": 0.6743, - "step": 39 - }, - { - "epoch": 0.01, - "learning_rate": 5.9884813981199526e-06, - "loss": 0.6604, - "step": 40 - }, - { - "epoch": 0.01, - "learning_rate": 5.988084204951675e-06, - "loss": 0.6609, - "step": 41 - }, - { - "epoch": 0.01, - "learning_rate": 5.987687011783397e-06, - "loss": 0.6524, - "step": 42 - }, - { - "epoch": 0.01, - "learning_rate": 5.98728981861512e-06, - "loss": 0.6386, - "step": 43 - }, - { - "epoch": 0.01, - "learning_rate": 5.986892625446843e-06, - "loss": 0.728, - "step": 44 - }, - { - "epoch": 0.01, - "learning_rate": 5.986495432278565e-06, - "loss": 0.6971, - "step": 45 - }, - { - "epoch": 0.01, - "learning_rate": 5.986098239110287e-06, - "loss": 0.6772, - "step": 46 - }, - { - "epoch": 0.01, - "learning_rate": 5.98570104594201e-06, - "loss": 0.6774, - "step": 47 - }, - { - "epoch": 0.01, - "learning_rate": 5.9853038527737325e-06, - "loss": 0.6868, - "step": 48 - }, - { - "epoch": 0.01, - "learning_rate": 5.984906659605455e-06, - "loss": 0.7169, - "step": 49 - }, - { - "epoch": 0.01, - "learning_rate": 5.984509466437178e-06, - "loss": 0.669, - "step": 50 - }, - { - "epoch": 0.01, - "learning_rate": 5.9841122732689e-06, - "loss": 0.7112, - "step": 51 - }, - { - "epoch": 0.01, - "learning_rate": 5.983715080100622e-06, - "loss": 0.6667, - "step": 52 - }, - { - "epoch": 0.01, - "learning_rate": 5.983317886932344e-06, - "loss": 0.6528, - "step": 53 - }, - { - "epoch": 0.01, - "learning_rate": 5.982920693764068e-06, - "loss": 0.6699, - "step": 54 - }, - { - "epoch": 0.01, - "learning_rate": 5.98252350059579e-06, - "loss": 0.6584, - "step": 55 - }, - { - "epoch": 0.01, - "learning_rate": 5.9821263074275125e-06, - "loss": 0.6328, - "step": 56 - }, - { - "epoch": 0.01, - "learning_rate": 5.981729114259235e-06, - "loss": 0.6472, - "step": 57 - }, - { - "epoch": 0.01, - "learning_rate": 5.981331921090958e-06, - "loss": 0.6992, - "step": 58 - }, - { - "epoch": 0.01, - "learning_rate": 5.98093472792268e-06, - "loss": 0.6666, - "step": 59 - }, - { - "epoch": 0.01, - "learning_rate": 5.980537534754402e-06, - "loss": 0.6819, - "step": 60 - }, - { - "epoch": 0.01, - "learning_rate": 5.980140341586125e-06, - "loss": 0.705, - "step": 61 - }, - { - "epoch": 0.01, - "learning_rate": 5.979743148417847e-06, - "loss": 0.6871, - "step": 62 - }, - { - "epoch": 0.01, - "learning_rate": 5.97934595524957e-06, - "loss": 0.6998, - "step": 63 - }, - { - "epoch": 0.01, - "learning_rate": 5.978948762081292e-06, - "loss": 0.6081, - "step": 64 - }, - { - "epoch": 0.01, - "learning_rate": 5.9785515689130154e-06, - "loss": 0.6985, - "step": 65 - }, - { - "epoch": 0.01, - "learning_rate": 5.978154375744738e-06, - "loss": 0.6631, - "step": 66 - }, - { - "epoch": 0.01, - "learning_rate": 5.97775718257646e-06, - "loss": 0.6534, - "step": 67 - }, - { - "epoch": 0.01, - "learning_rate": 5.977359989408182e-06, - "loss": 0.6685, - "step": 68 - }, - { - "epoch": 0.01, - "learning_rate": 5.976962796239905e-06, - "loss": 0.6821, - "step": 69 - }, - { - "epoch": 0.01, - "learning_rate": 5.976565603071627e-06, - "loss": 0.6241, - "step": 70 - }, - { - "epoch": 0.01, - "learning_rate": 5.976168409903349e-06, - "loss": 0.6357, - "step": 71 - }, - { - "epoch": 0.01, - "learning_rate": 5.975771216735072e-06, - "loss": 0.6466, - "step": 72 - }, - { - "epoch": 0.01, - "learning_rate": 5.975374023566795e-06, - "loss": 0.6579, - "step": 73 - }, - { - "epoch": 0.01, - "learning_rate": 5.9749768303985176e-06, - "loss": 0.6298, - "step": 74 - }, - { - "epoch": 0.01, - "learning_rate": 5.97457963723024e-06, - "loss": 0.703, - "step": 75 - }, - { - "epoch": 0.01, - "learning_rate": 5.974182444061963e-06, - "loss": 0.6152, - "step": 76 - }, - { - "epoch": 0.01, - "learning_rate": 5.973785250893685e-06, - "loss": 0.6682, - "step": 77 - }, - { - "epoch": 0.01, - "learning_rate": 5.973388057725407e-06, - "loss": 0.6427, - "step": 78 - }, - { - "epoch": 0.01, - "learning_rate": 5.972990864557129e-06, - "loss": 0.6969, - "step": 79 - }, - { - "epoch": 0.01, - "learning_rate": 5.972593671388852e-06, - "loss": 0.6619, - "step": 80 - }, - { - "epoch": 0.01, - "learning_rate": 5.9721964782205745e-06, - "loss": 0.6332, - "step": 81 - }, - { - "epoch": 0.01, - "learning_rate": 5.9717992850522975e-06, - "loss": 0.6203, - "step": 82 - }, - { - "epoch": 0.01, - "learning_rate": 5.97140209188402e-06, - "loss": 0.6463, - "step": 83 - }, - { - "epoch": 0.01, - "learning_rate": 5.971004898715743e-06, - "loss": 0.6718, - "step": 84 - }, - { - "epoch": 0.01, - "learning_rate": 5.970607705547465e-06, - "loss": 0.6495, - "step": 85 - }, - { - "epoch": 0.01, - "learning_rate": 5.970210512379187e-06, - "loss": 0.5787, - "step": 86 - }, - { - "epoch": 0.01, - "learning_rate": 5.96981331921091e-06, - "loss": 0.6897, - "step": 87 - }, - { - "epoch": 0.01, - "learning_rate": 5.969416126042632e-06, - "loss": 0.6688, - "step": 88 - }, - { - "epoch": 0.01, - "learning_rate": 5.9690189328743544e-06, - "loss": 0.6697, - "step": 89 - }, - { - "epoch": 0.01, - "learning_rate": 5.968621739706077e-06, - "loss": 0.6156, - "step": 90 - }, - { - "epoch": 0.01, - "learning_rate": 5.9682245465378e-06, - "loss": 0.6301, - "step": 91 - }, - { - "epoch": 0.01, - "learning_rate": 5.967827353369523e-06, - "loss": 0.6121, - "step": 92 - }, - { - "epoch": 0.01, - "learning_rate": 5.967430160201245e-06, - "loss": 0.6177, - "step": 93 - }, - { - "epoch": 0.01, - "learning_rate": 5.967032967032967e-06, - "loss": 0.611, - "step": 94 - }, - { - "epoch": 0.01, - "learning_rate": 5.96663577386469e-06, - "loss": 0.6359, - "step": 95 - }, - { - "epoch": 0.01, - "learning_rate": 5.966238580696412e-06, - "loss": 0.6417, - "step": 96 - }, - { - "epoch": 0.01, - "learning_rate": 5.965841387528134e-06, - "loss": 0.6312, - "step": 97 - }, - { - "epoch": 0.01, - "learning_rate": 5.965444194359857e-06, - "loss": 0.6184, - "step": 98 - }, - { - "epoch": 0.01, - "learning_rate": 5.9650470011915796e-06, - "loss": 0.6724, - "step": 99 - }, - { - "epoch": 0.01, - "learning_rate": 5.964649808023302e-06, - "loss": 0.6833, - "step": 100 - }, - { - "epoch": 0.01, - "learning_rate": 5.964252614855025e-06, - "loss": 0.6433, - "step": 101 - }, - { - "epoch": 0.01, - "learning_rate": 5.963855421686747e-06, - "loss": 0.6766, - "step": 102 - }, - { - "epoch": 0.01, - "learning_rate": 5.96345822851847e-06, - "loss": 0.6527, - "step": 103 - }, - { - "epoch": 0.01, - "learning_rate": 5.963061035350192e-06, - "loss": 0.5982, - "step": 104 - }, - { - "epoch": 0.01, - "learning_rate": 5.962663842181914e-06, - "loss": 0.6749, - "step": 105 - }, - { - "epoch": 0.01, - "learning_rate": 5.962266649013637e-06, - "loss": 0.6494, - "step": 106 - }, - { - "epoch": 0.01, - "learning_rate": 5.9618694558453595e-06, - "loss": 0.6998, - "step": 107 - }, - { - "epoch": 0.01, - "learning_rate": 5.961472262677082e-06, - "loss": 0.6112, - "step": 108 - }, - { - "epoch": 0.01, - "learning_rate": 5.961075069508805e-06, - "loss": 0.624, - "step": 109 - }, - { - "epoch": 0.01, - "learning_rate": 5.960677876340528e-06, - "loss": 0.6329, - "step": 110 - }, - { - "epoch": 0.01, - "learning_rate": 5.96028068317225e-06, - "loss": 0.6491, - "step": 111 - }, - { - "epoch": 0.01, - "learning_rate": 5.959883490003972e-06, - "loss": 0.6672, - "step": 112 - }, - { - "epoch": 0.01, - "learning_rate": 5.959486296835694e-06, - "loss": 0.6279, - "step": 113 - }, - { - "epoch": 0.02, - "learning_rate": 5.959089103667417e-06, - "loss": 0.6479, - "step": 114 - }, - { - "epoch": 0.02, - "learning_rate": 5.9586919104991395e-06, - "loss": 0.6214, - "step": 115 - }, - { - "epoch": 0.02, - "learning_rate": 5.958294717330862e-06, - "loss": 0.6618, - "step": 116 - }, - { - "epoch": 0.02, - "learning_rate": 5.957897524162585e-06, - "loss": 0.6703, - "step": 117 - }, - { - "epoch": 0.02, - "learning_rate": 5.957500330994307e-06, - "loss": 0.6417, - "step": 118 - }, - { - "epoch": 0.02, - "learning_rate": 5.957103137826029e-06, - "loss": 0.631, - "step": 119 - }, - { - "epoch": 0.02, - "learning_rate": 5.956705944657752e-06, - "loss": 0.6169, - "step": 120 - }, - { - "epoch": 0.02, - "learning_rate": 5.956308751489475e-06, - "loss": 0.6521, - "step": 121 - }, - { - "epoch": 0.02, - "learning_rate": 5.955911558321197e-06, - "loss": 0.6635, - "step": 122 - }, - { - "epoch": 0.02, - "learning_rate": 5.955514365152919e-06, - "loss": 0.6496, - "step": 123 - }, - { - "epoch": 0.02, - "learning_rate": 5.955117171984642e-06, - "loss": 0.6431, - "step": 124 - }, - { - "epoch": 0.02, - "learning_rate": 5.954719978816365e-06, - "loss": 0.6246, - "step": 125 - }, - { - "epoch": 0.02, - "learning_rate": 5.954322785648087e-06, - "loss": 0.6557, - "step": 126 - }, - { - "epoch": 0.02, - "learning_rate": 5.953925592479809e-06, - "loss": 0.6082, - "step": 127 - }, - { - "epoch": 0.02, - "learning_rate": 5.953528399311532e-06, - "loss": 0.5941, - "step": 128 - }, - { - "epoch": 0.02, - "learning_rate": 5.953131206143255e-06, - "loss": 0.6566, - "step": 129 - }, - { - "epoch": 0.02, - "learning_rate": 5.952734012974977e-06, - "loss": 0.6243, - "step": 130 - }, - { - "epoch": 0.02, - "learning_rate": 5.952336819806699e-06, - "loss": 0.594, - "step": 131 - }, - { - "epoch": 0.02, - "learning_rate": 5.951939626638422e-06, - "loss": 0.68, - "step": 132 - }, - { - "epoch": 0.02, - "learning_rate": 5.9515424334701446e-06, - "loss": 0.6302, - "step": 133 - }, - { - "epoch": 0.02, - "learning_rate": 5.951145240301867e-06, - "loss": 0.6251, - "step": 134 - }, - { - "epoch": 0.02, - "learning_rate": 5.950748047133589e-06, - "loss": 0.6326, - "step": 135 - }, - { - "epoch": 0.02, - "learning_rate": 5.950350853965312e-06, - "loss": 0.6314, - "step": 136 - }, - { - "epoch": 0.02, - "learning_rate": 5.949953660797034e-06, - "loss": 0.6598, - "step": 137 - }, - { - "epoch": 0.02, - "learning_rate": 5.949556467628757e-06, - "loss": 0.6583, - "step": 138 - }, - { - "epoch": 0.02, - "learning_rate": 5.949159274460479e-06, - "loss": 0.6162, - "step": 139 - }, - { - "epoch": 0.02, - "learning_rate": 5.948762081292202e-06, - "loss": 0.7042, - "step": 140 - }, - { - "epoch": 0.02, - "learning_rate": 5.9483648881239245e-06, - "loss": 0.6733, - "step": 141 - }, - { - "epoch": 0.02, - "learning_rate": 5.947967694955647e-06, - "loss": 0.6103, - "step": 142 - }, - { - "epoch": 0.02, - "learning_rate": 5.94757050178737e-06, - "loss": 0.6269, - "step": 143 - }, - { - "epoch": 0.02, - "learning_rate": 5.947173308619092e-06, - "loss": 0.663, - "step": 144 - }, - { - "epoch": 0.02, - "learning_rate": 5.946776115450814e-06, - "loss": 0.5794, - "step": 145 - }, - { - "epoch": 0.02, - "learning_rate": 5.946378922282537e-06, - "loss": 0.6868, - "step": 146 - }, - { - "epoch": 0.02, - "learning_rate": 5.945981729114259e-06, - "loss": 0.6064, - "step": 147 - }, - { - "epoch": 0.02, - "learning_rate": 5.945584535945982e-06, - "loss": 0.6519, - "step": 148 - }, - { - "epoch": 0.02, - "learning_rate": 5.9451873427777045e-06, - "loss": 0.655, - "step": 149 - }, - { - "epoch": 0.02, - "learning_rate": 5.944790149609427e-06, - "loss": 0.6617, - "step": 150 - }, - { - "epoch": 0.02, - "learning_rate": 5.94439295644115e-06, - "loss": 0.627, - "step": 151 - }, - { - "epoch": 0.02, - "learning_rate": 5.943995763272872e-06, - "loss": 0.5837, - "step": 152 - }, - { - "epoch": 0.02, - "learning_rate": 5.943598570104594e-06, - "loss": 0.6201, - "step": 153 - }, - { - "epoch": 0.02, - "learning_rate": 5.943201376936317e-06, - "loss": 0.6291, - "step": 154 - }, - { - "epoch": 0.02, - "learning_rate": 5.942804183768039e-06, - "loss": 0.6061, - "step": 155 - }, - { - "epoch": 0.02, - "learning_rate": 5.942406990599761e-06, - "loss": 0.624, - "step": 156 - }, - { - "epoch": 0.02, - "learning_rate": 5.942009797431484e-06, - "loss": 0.6418, - "step": 157 - }, - { - "epoch": 0.02, - "learning_rate": 5.941612604263207e-06, - "loss": 0.5858, - "step": 158 - }, - { - "epoch": 0.02, - "learning_rate": 5.94121541109493e-06, - "loss": 0.6407, - "step": 159 - }, - { - "epoch": 0.02, - "learning_rate": 5.940818217926652e-06, - "loss": 0.6222, - "step": 160 - }, - { - "epoch": 0.02, - "learning_rate": 5.940421024758374e-06, - "loss": 0.5938, - "step": 161 - }, - { - "epoch": 0.02, - "learning_rate": 5.940023831590097e-06, - "loss": 0.6157, - "step": 162 - }, - { - "epoch": 0.02, - "learning_rate": 5.939626638421819e-06, - "loss": 0.5989, - "step": 163 - }, - { - "epoch": 0.02, - "learning_rate": 5.939229445253541e-06, - "loss": 0.7056, - "step": 164 - }, - { - "epoch": 0.02, - "learning_rate": 5.938832252085264e-06, - "loss": 0.6606, - "step": 165 - }, - { - "epoch": 0.02, - "learning_rate": 5.9384350589169865e-06, - "loss": 0.6303, - "step": 166 - }, - { - "epoch": 0.02, - "learning_rate": 5.9380378657487095e-06, - "loss": 0.6332, - "step": 167 - }, - { - "epoch": 0.02, - "learning_rate": 5.937640672580432e-06, - "loss": 0.6197, - "step": 168 - }, - { - "epoch": 0.02, - "learning_rate": 5.937243479412155e-06, - "loss": 0.6318, - "step": 169 - }, - { - "epoch": 0.02, - "learning_rate": 5.936846286243877e-06, - "loss": 0.6598, - "step": 170 - }, - { - "epoch": 0.02, - "learning_rate": 5.936449093075599e-06, - "loss": 0.662, - "step": 171 - }, - { - "epoch": 0.02, - "learning_rate": 5.936051899907321e-06, - "loss": 0.6018, - "step": 172 - }, - { - "epoch": 0.02, - "learning_rate": 5.935654706739044e-06, - "loss": 0.6955, - "step": 173 - }, - { - "epoch": 0.02, - "learning_rate": 5.9352575135707665e-06, - "loss": 0.6283, - "step": 174 - }, - { - "epoch": 0.02, - "learning_rate": 5.934860320402489e-06, - "loss": 0.6829, - "step": 175 - }, - { - "epoch": 0.02, - "learning_rate": 5.934463127234212e-06, - "loss": 0.5985, - "step": 176 - }, - { - "epoch": 0.02, - "learning_rate": 5.934065934065935e-06, - "loss": 0.6385, - "step": 177 - }, - { - "epoch": 0.02, - "learning_rate": 5.933668740897657e-06, - "loss": 0.6326, - "step": 178 - }, - { - "epoch": 0.02, - "learning_rate": 5.933271547729379e-06, - "loss": 0.639, - "step": 179 - }, - { - "epoch": 0.02, - "learning_rate": 5.932874354561102e-06, - "loss": 0.6084, - "step": 180 - }, - { - "epoch": 0.02, - "learning_rate": 5.932477161392824e-06, - "loss": 0.6549, - "step": 181 - }, - { - "epoch": 0.02, - "learning_rate": 5.932079968224546e-06, - "loss": 0.6728, - "step": 182 - }, - { - "epoch": 0.02, - "learning_rate": 5.931682775056269e-06, - "loss": 0.6351, - "step": 183 - }, - { - "epoch": 0.02, - "learning_rate": 5.931285581887992e-06, - "loss": 0.6375, - "step": 184 - }, - { - "epoch": 0.02, - "learning_rate": 5.930888388719714e-06, - "loss": 0.6814, - "step": 185 - }, - { - "epoch": 0.02, - "learning_rate": 5.930491195551437e-06, - "loss": 0.5968, - "step": 186 - }, - { - "epoch": 0.02, - "learning_rate": 5.930094002383159e-06, - "loss": 0.6053, - "step": 187 - }, - { - "epoch": 0.02, - "learning_rate": 5.929696809214882e-06, - "loss": 0.6468, - "step": 188 - }, - { - "epoch": 0.03, - "learning_rate": 5.929299616046604e-06, - "loss": 0.6407, - "step": 189 - }, - { - "epoch": 0.03, - "learning_rate": 5.928902422878326e-06, - "loss": 0.6996, - "step": 190 - }, - { - "epoch": 0.03, - "learning_rate": 5.928505229710049e-06, - "loss": 0.6158, - "step": 191 - }, - { - "epoch": 0.03, - "learning_rate": 5.9281080365417716e-06, - "loss": 0.6128, - "step": 192 - }, - { - "epoch": 0.03, - "learning_rate": 5.927710843373494e-06, - "loss": 0.6558, - "step": 193 - }, - { - "epoch": 0.03, - "learning_rate": 5.927313650205216e-06, - "loss": 0.6726, - "step": 194 - }, - { - "epoch": 0.03, - "learning_rate": 5.92691645703694e-06, - "loss": 0.6292, - "step": 195 - }, - { - "epoch": 0.03, - "learning_rate": 5.926519263868662e-06, - "loss": 0.6004, - "step": 196 - }, - { - "epoch": 0.03, - "learning_rate": 5.926122070700384e-06, - "loss": 0.599, - "step": 197 - }, - { - "epoch": 0.03, - "learning_rate": 5.925724877532106e-06, - "loss": 0.6374, - "step": 198 - }, - { - "epoch": 0.03, - "learning_rate": 5.925327684363829e-06, - "loss": 0.6472, - "step": 199 - }, - { - "epoch": 0.03, - "learning_rate": 5.9249304911955515e-06, - "loss": 0.594, - "step": 200 - } - ], - "logging_steps": 1, - "max_steps": 15116, - "num_train_epochs": 2, - "save_steps": 50, - "total_flos": 8.388085624602624e+17, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-200/training_args.bin b/checkpoint-200/training_args.bin deleted file mode 100644 index 731dabe55350d521ab0dfde0b2f023771c347250..0000000000000000000000000000000000000000 --- a/checkpoint-200/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2f05be88d930176935da1678b48a8294634889bf7ae4f8bebdbaca140c2dac08 -size 5947 diff --git a/checkpoint-200/zero_to_fp32.py b/checkpoint-200/zero_to_fp32.py deleted file mode 100755 index c98caae31534368be22b67fc4ae906836c992a8d..0000000000000000000000000000000000000000 --- a/checkpoint-200/zero_to_fp32.py +++ /dev/null @@ -1,587 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag) diff --git a/checkpoint-250/config.json b/checkpoint-250/config.json deleted file mode 100644 index 82bacd02f94ab4a4cfdb7b12a0484fac8f301916..0000000000000000000000000000000000000000 --- a/checkpoint-250/config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "_name_or_path": "mistralai/Mistral-7B-v0.1", - "architectures": [ - "MistralForCausalLM" - ], - "bos_token_id": 1, - "eos_token_id": 2, - "hidden_act": "silu", - "hidden_size": 4096, - "initializer_range": 0.02, - "intermediate_size": 14336, - "max_position_embeddings": 32768, - "model_type": "mistral", - "num_attention_heads": 32, - "num_hidden_layers": 32, - "num_key_value_heads": 8, - "rms_norm_eps": 1e-05, - "rope_theta": 10000.0, - "sliding_window": 4096, - "tie_word_embeddings": false, - "torch_dtype": "bfloat16", - "transformers_version": "4.34.0.dev0", - "use_cache": false, - "vocab_size": 32002 -} diff --git a/checkpoint-250/generation_config.json b/checkpoint-250/generation_config.json deleted file mode 100644 index 2c5f418036a121b3fd432d1bf2b3c5c9daf59fab..0000000000000000000000000000000000000000 --- a/checkpoint-250/generation_config.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "_from_model_config": true, - "bos_token_id": 1, - "eos_token_id": 2, - "transformers_version": "4.34.0.dev0" -} diff --git a/checkpoint-250/latest b/checkpoint-250/latest deleted file mode 100644 index 87449ff1a854ba4a77ea33fbc24adaed3311d6b1..0000000000000000000000000000000000000000 --- a/checkpoint-250/latest +++ /dev/null @@ -1 +0,0 @@ -global_step250 \ No newline at end of file diff --git a/checkpoint-250/pytorch_model-00001-of-00002.bin b/checkpoint-250/pytorch_model-00001-of-00002.bin deleted file mode 100644 index c5b88b0093e91362eb3d554490f9d4e7d9c078e1..0000000000000000000000000000000000000000 --- a/checkpoint-250/pytorch_model-00001-of-00002.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0b50d9439b999b9fd5b6c2e695a1483624a4c6c6bcee46f35f79806dde564275 -size 9943044428 diff --git a/checkpoint-250/pytorch_model-00002-of-00002.bin b/checkpoint-250/pytorch_model-00002-of-00002.bin deleted file mode 100644 index d82cccbceb6c3792ef28e09867f6b37bb9767afd..0000000000000000000000000000000000000000 --- a/checkpoint-250/pytorch_model-00002-of-00002.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2a838f1a551e9ca10152f3cdb899a08d47a341ddb20b01839dc38a3eb6dac268 -size 4540552031 diff --git a/checkpoint-250/pytorch_model.bin.index.json b/checkpoint-250/pytorch_model.bin.index.json deleted file mode 100644 index 53213fb82ddc02718be2ce686f00ba7fb0af95e7..0000000000000000000000000000000000000000 --- a/checkpoint-250/pytorch_model.bin.index.json +++ /dev/null @@ -1,298 +0,0 @@ -{ - "metadata": { - "total_size": 14483496960 - }, - "weight_map": { - "lm_head.weight": "pytorch_model-00002-of-00002.bin", - "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.norm.weight": "pytorch_model-00002-of-00002.bin" - } -} diff --git a/checkpoint-250/rng_state_0.pth b/checkpoint-250/rng_state_0.pth deleted file mode 100644 index 24aba7bcc2a9fb783bd13a6a67b96e5ad055d89d..0000000000000000000000000000000000000000 --- a/checkpoint-250/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1eafe3d5e0585dde8c5033613de99a5d4f23df4284a488f4007b3944580c0b97 -size 17655 diff --git a/checkpoint-250/rng_state_1.pth b/checkpoint-250/rng_state_1.pth deleted file mode 100644 index 6b2ef88173fde17f2b3e738a28446f89a0528a96..0000000000000000000000000000000000000000 --- a/checkpoint-250/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e34eb456d2d003a2839f2daa9425e99bdd79ed7e24a1de9fc7d5738476bfb4b -size 17655 diff --git a/checkpoint-250/rng_state_2.pth b/checkpoint-250/rng_state_2.pth deleted file mode 100644 index 7b118d52a3006aea6c44f23f94c5568d1fb0a2f3..0000000000000000000000000000000000000000 --- a/checkpoint-250/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b374af4a2765d8771cee7a72921d3c2e438b9bee34f0b2d098ce6071afeb65e4 -size 17655 diff --git a/checkpoint-250/rng_state_3.pth b/checkpoint-250/rng_state_3.pth deleted file mode 100644 index 3f6fd9aa58eb1d5815ca991134531a3280601900..0000000000000000000000000000000000000000 --- a/checkpoint-250/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5df75d8477fcc69c7abb03025313915ebfe3ac18c54a7c57aaa455c0099e13e5 -size 17655 diff --git a/checkpoint-250/trainer_state.json b/checkpoint-250/trainer_state.json deleted file mode 100644 index cff5531f57c038af15c75c5f96c3d4bbe83b34d8..0000000000000000000000000000000000000000 --- a/checkpoint-250/trainer_state.json +++ /dev/null @@ -1,1543 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 0.03307753373908441, - "eval_steps": 756, - "global_step": 250, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.0, - "learning_rate": 0.0, - "loss": 0.9197, - "step": 1 - }, - { - "epoch": 0.0, - "eval_loss": 1.4652303457260132, - "eval_runtime": 2.1726, - "eval_samples_per_second": 79.627, - "eval_steps_per_second": 3.682, - "step": 1 - }, - { - "epoch": 0.0, - "eval_bench_accuracy_agieval": 0.2711864406779661, - "eval_bench_accuracy_arc_challenge": 0.8703703703703703, - "eval_bench_accuracy_arc_easy": 0.9259259259259259, - "eval_bench_accuracy_bigbench": 0.36065573770491804, - "eval_bench_accuracy_boolq": 0.5740740740740741, - "eval_bench_accuracy_mmlu": 0.5185185185185185, - "eval_bench_accuracy_openbookqa": 0.1111111111111111, - "eval_bench_accuracy_truthful_qa": 0.3584905660377358, - "eval_bench_accuracy_winogrande": 0.4444444444444444, - "eval_bench_average_accuracy": 0.4927530209850072, - "eval_bench_loss": 2.6978388407144203, - "eval_bench_total_accuracy": 0.48893360160965793, - "step": 1 - }, - { - "epoch": 0.0, - "learning_rate": 6.000000000000001e-07, - "loss": 1.3426, - "step": 2 - }, - { - "epoch": 0.0, - "learning_rate": 1.2000000000000002e-06, - "loss": 1.5882, - "step": 3 - }, - { - "epoch": 0.0, - "learning_rate": 1.8e-06, - "loss": 0.8542, - "step": 4 - }, - { - "epoch": 0.0, - "learning_rate": 2.4000000000000003e-06, - "loss": 0.9629, - "step": 5 - }, - { - "epoch": 0.0, - "learning_rate": 3e-06, - "loss": 0.903, - "step": 6 - }, - { - "epoch": 0.0, - "learning_rate": 3.6e-06, - "loss": 0.909, - "step": 7 - }, - { - "epoch": 0.0, - "learning_rate": 4.2e-06, - "loss": 0.8666, - "step": 8 - }, - { - "epoch": 0.0, - "learning_rate": 4.800000000000001e-06, - "loss": 1.0108, - "step": 9 - }, - { - "epoch": 0.0, - "learning_rate": 5.4e-06, - "loss": 0.8958, - "step": 10 - }, - { - "epoch": 0.0, - "learning_rate": 6e-06, - "loss": 0.9348, - "step": 11 - }, - { - "epoch": 0.0, - "learning_rate": 5.999602806831722e-06, - "loss": 0.7832, - "step": 12 - }, - { - "epoch": 0.0, - "learning_rate": 5.999205613663445e-06, - "loss": 0.8083, - "step": 13 - }, - { - "epoch": 0.0, - "learning_rate": 5.9988084204951675e-06, - "loss": 0.8164, - "step": 14 - }, - { - "epoch": 0.0, - "learning_rate": 5.99841122732689e-06, - "loss": 0.7834, - "step": 15 - }, - { - "epoch": 0.0, - "learning_rate": 5.998014034158613e-06, - "loss": 0.8718, - "step": 16 - }, - { - "epoch": 0.0, - "learning_rate": 5.997616840990336e-06, - "loss": 0.84, - "step": 17 - }, - { - "epoch": 0.0, - "learning_rate": 5.997219647822058e-06, - "loss": 0.7397, - "step": 18 - }, - { - "epoch": 0.0, - "learning_rate": 5.99682245465378e-06, - "loss": 0.7445, - "step": 19 - }, - { - "epoch": 0.0, - "learning_rate": 5.996425261485502e-06, - "loss": 0.7898, - "step": 20 - }, - { - "epoch": 0.0, - "learning_rate": 5.996028068317225e-06, - "loss": 0.7388, - "step": 21 - }, - { - "epoch": 0.0, - "learning_rate": 5.9956308751489475e-06, - "loss": 0.7296, - "step": 22 - }, - { - "epoch": 0.0, - "learning_rate": 5.99523368198067e-06, - "loss": 0.7993, - "step": 23 - }, - { - "epoch": 0.0, - "learning_rate": 5.994836488812393e-06, - "loss": 0.7188, - "step": 24 - }, - { - "epoch": 0.0, - "learning_rate": 5.994439295644115e-06, - "loss": 0.7473, - "step": 25 - }, - { - "epoch": 0.0, - "learning_rate": 5.994042102475838e-06, - "loss": 0.6997, - "step": 26 - }, - { - "epoch": 0.0, - "learning_rate": 5.99364490930756e-06, - "loss": 0.725, - "step": 27 - }, - { - "epoch": 0.0, - "learning_rate": 5.993247716139283e-06, - "loss": 0.7272, - "step": 28 - }, - { - "epoch": 0.0, - "learning_rate": 5.992850522971005e-06, - "loss": 0.7427, - "step": 29 - }, - { - "epoch": 0.0, - "learning_rate": 5.992453329802727e-06, - "loss": 0.7309, - "step": 30 - }, - { - "epoch": 0.0, - "learning_rate": 5.99205613663445e-06, - "loss": 0.6764, - "step": 31 - }, - { - "epoch": 0.0, - "learning_rate": 5.991658943466173e-06, - "loss": 0.7556, - "step": 32 - }, - { - "epoch": 0.0, - "learning_rate": 5.991261750297895e-06, - "loss": 0.7301, - "step": 33 - }, - { - "epoch": 0.0, - "learning_rate": 5.990864557129617e-06, - "loss": 0.6776, - "step": 34 - }, - { - "epoch": 0.0, - "learning_rate": 5.99046736396134e-06, - "loss": 0.6884, - "step": 35 - }, - { - "epoch": 0.0, - "learning_rate": 5.990070170793063e-06, - "loss": 0.7179, - "step": 36 - }, - { - "epoch": 0.0, - "learning_rate": 5.989672977624785e-06, - "loss": 0.6915, - "step": 37 - }, - { - "epoch": 0.01, - "learning_rate": 5.989275784456507e-06, - "loss": 0.7308, - "step": 38 - }, - { - "epoch": 0.01, - "learning_rate": 5.98887859128823e-06, - "loss": 0.6743, - "step": 39 - }, - { - "epoch": 0.01, - "learning_rate": 5.9884813981199526e-06, - "loss": 0.6604, - "step": 40 - }, - { - "epoch": 0.01, - "learning_rate": 5.988084204951675e-06, - "loss": 0.6609, - "step": 41 - }, - { - "epoch": 0.01, - "learning_rate": 5.987687011783397e-06, - "loss": 0.6524, - "step": 42 - }, - { - "epoch": 0.01, - "learning_rate": 5.98728981861512e-06, - "loss": 0.6386, - "step": 43 - }, - { - "epoch": 0.01, - "learning_rate": 5.986892625446843e-06, - "loss": 0.728, - "step": 44 - }, - { - "epoch": 0.01, - "learning_rate": 5.986495432278565e-06, - "loss": 0.6971, - "step": 45 - }, - { - "epoch": 0.01, - "learning_rate": 5.986098239110287e-06, - "loss": 0.6772, - "step": 46 - }, - { - "epoch": 0.01, - "learning_rate": 5.98570104594201e-06, - "loss": 0.6774, - "step": 47 - }, - { - "epoch": 0.01, - "learning_rate": 5.9853038527737325e-06, - "loss": 0.6868, - "step": 48 - }, - { - "epoch": 0.01, - "learning_rate": 5.984906659605455e-06, - "loss": 0.7169, - "step": 49 - }, - { - "epoch": 0.01, - "learning_rate": 5.984509466437178e-06, - "loss": 0.669, - "step": 50 - }, - { - "epoch": 0.01, - "learning_rate": 5.9841122732689e-06, - "loss": 0.7112, - "step": 51 - }, - { - "epoch": 0.01, - "learning_rate": 5.983715080100622e-06, - "loss": 0.6667, - "step": 52 - }, - { - "epoch": 0.01, - "learning_rate": 5.983317886932344e-06, - "loss": 0.6528, - "step": 53 - }, - { - "epoch": 0.01, - "learning_rate": 5.982920693764068e-06, - "loss": 0.6699, - "step": 54 - }, - { - "epoch": 0.01, - "learning_rate": 5.98252350059579e-06, - "loss": 0.6584, - "step": 55 - }, - { - "epoch": 0.01, - "learning_rate": 5.9821263074275125e-06, - "loss": 0.6328, - "step": 56 - }, - { - "epoch": 0.01, - "learning_rate": 5.981729114259235e-06, - "loss": 0.6472, - "step": 57 - }, - { - "epoch": 0.01, - "learning_rate": 5.981331921090958e-06, - "loss": 0.6992, - "step": 58 - }, - { - "epoch": 0.01, - "learning_rate": 5.98093472792268e-06, - "loss": 0.6666, - "step": 59 - }, - { - "epoch": 0.01, - "learning_rate": 5.980537534754402e-06, - "loss": 0.6819, - "step": 60 - }, - { - "epoch": 0.01, - "learning_rate": 5.980140341586125e-06, - "loss": 0.705, - "step": 61 - }, - { - "epoch": 0.01, - "learning_rate": 5.979743148417847e-06, - "loss": 0.6871, - "step": 62 - }, - { - "epoch": 0.01, - "learning_rate": 5.97934595524957e-06, - "loss": 0.6998, - "step": 63 - }, - { - "epoch": 0.01, - "learning_rate": 5.978948762081292e-06, - "loss": 0.6081, - "step": 64 - }, - { - "epoch": 0.01, - "learning_rate": 5.9785515689130154e-06, - "loss": 0.6985, - "step": 65 - }, - { - "epoch": 0.01, - "learning_rate": 5.978154375744738e-06, - "loss": 0.6631, - "step": 66 - }, - { - "epoch": 0.01, - "learning_rate": 5.97775718257646e-06, - "loss": 0.6534, - "step": 67 - }, - { - "epoch": 0.01, - "learning_rate": 5.977359989408182e-06, - "loss": 0.6685, - "step": 68 - }, - { - "epoch": 0.01, - "learning_rate": 5.976962796239905e-06, - "loss": 0.6821, - "step": 69 - }, - { - "epoch": 0.01, - "learning_rate": 5.976565603071627e-06, - "loss": 0.6241, - "step": 70 - }, - { - "epoch": 0.01, - "learning_rate": 5.976168409903349e-06, - "loss": 0.6357, - "step": 71 - }, - { - "epoch": 0.01, - "learning_rate": 5.975771216735072e-06, - "loss": 0.6466, - "step": 72 - }, - { - "epoch": 0.01, - "learning_rate": 5.975374023566795e-06, - "loss": 0.6579, - "step": 73 - }, - { - "epoch": 0.01, - "learning_rate": 5.9749768303985176e-06, - "loss": 0.6298, - "step": 74 - }, - { - "epoch": 0.01, - "learning_rate": 5.97457963723024e-06, - "loss": 0.703, - "step": 75 - }, - { - "epoch": 0.01, - "learning_rate": 5.974182444061963e-06, - "loss": 0.6152, - "step": 76 - }, - { - "epoch": 0.01, - "learning_rate": 5.973785250893685e-06, - "loss": 0.6682, - "step": 77 - }, - { - "epoch": 0.01, - "learning_rate": 5.973388057725407e-06, - "loss": 0.6427, - "step": 78 - }, - { - "epoch": 0.01, - "learning_rate": 5.972990864557129e-06, - "loss": 0.6969, - "step": 79 - }, - { - "epoch": 0.01, - "learning_rate": 5.972593671388852e-06, - "loss": 0.6619, - "step": 80 - }, - { - "epoch": 0.01, - "learning_rate": 5.9721964782205745e-06, - "loss": 0.6332, - "step": 81 - }, - { - "epoch": 0.01, - "learning_rate": 5.9717992850522975e-06, - "loss": 0.6203, - "step": 82 - }, - { - "epoch": 0.01, - "learning_rate": 5.97140209188402e-06, - "loss": 0.6463, - "step": 83 - }, - { - "epoch": 0.01, - "learning_rate": 5.971004898715743e-06, - "loss": 0.6718, - "step": 84 - }, - { - "epoch": 0.01, - "learning_rate": 5.970607705547465e-06, - "loss": 0.6495, - "step": 85 - }, - { - "epoch": 0.01, - "learning_rate": 5.970210512379187e-06, - "loss": 0.5787, - "step": 86 - }, - { - "epoch": 0.01, - "learning_rate": 5.96981331921091e-06, - "loss": 0.6897, - "step": 87 - }, - { - "epoch": 0.01, - "learning_rate": 5.969416126042632e-06, - "loss": 0.6688, - "step": 88 - }, - { - "epoch": 0.01, - "learning_rate": 5.9690189328743544e-06, - "loss": 0.6697, - "step": 89 - }, - { - "epoch": 0.01, - "learning_rate": 5.968621739706077e-06, - "loss": 0.6156, - "step": 90 - }, - { - "epoch": 0.01, - "learning_rate": 5.9682245465378e-06, - "loss": 0.6301, - "step": 91 - }, - { - "epoch": 0.01, - "learning_rate": 5.967827353369523e-06, - "loss": 0.6121, - "step": 92 - }, - { - "epoch": 0.01, - "learning_rate": 5.967430160201245e-06, - "loss": 0.6177, - "step": 93 - }, - { - "epoch": 0.01, - "learning_rate": 5.967032967032967e-06, - "loss": 0.611, - "step": 94 - }, - { - "epoch": 0.01, - "learning_rate": 5.96663577386469e-06, - "loss": 0.6359, - "step": 95 - }, - { - "epoch": 0.01, - "learning_rate": 5.966238580696412e-06, - "loss": 0.6417, - "step": 96 - }, - { - "epoch": 0.01, - "learning_rate": 5.965841387528134e-06, - "loss": 0.6312, - "step": 97 - }, - { - "epoch": 0.01, - "learning_rate": 5.965444194359857e-06, - "loss": 0.6184, - "step": 98 - }, - { - "epoch": 0.01, - "learning_rate": 5.9650470011915796e-06, - "loss": 0.6724, - "step": 99 - }, - { - "epoch": 0.01, - "learning_rate": 5.964649808023302e-06, - "loss": 0.6833, - "step": 100 - }, - { - "epoch": 0.01, - "learning_rate": 5.964252614855025e-06, - "loss": 0.6433, - "step": 101 - }, - { - "epoch": 0.01, - "learning_rate": 5.963855421686747e-06, - "loss": 0.6766, - "step": 102 - }, - { - "epoch": 0.01, - "learning_rate": 5.96345822851847e-06, - "loss": 0.6527, - "step": 103 - }, - { - "epoch": 0.01, - "learning_rate": 5.963061035350192e-06, - "loss": 0.5982, - "step": 104 - }, - { - "epoch": 0.01, - "learning_rate": 5.962663842181914e-06, - "loss": 0.6749, - "step": 105 - }, - { - "epoch": 0.01, - "learning_rate": 5.962266649013637e-06, - "loss": 0.6494, - "step": 106 - }, - { - "epoch": 0.01, - "learning_rate": 5.9618694558453595e-06, - "loss": 0.6998, - "step": 107 - }, - { - "epoch": 0.01, - "learning_rate": 5.961472262677082e-06, - "loss": 0.6112, - "step": 108 - }, - { - "epoch": 0.01, - "learning_rate": 5.961075069508805e-06, - "loss": 0.624, - "step": 109 - }, - { - "epoch": 0.01, - "learning_rate": 5.960677876340528e-06, - "loss": 0.6329, - "step": 110 - }, - { - "epoch": 0.01, - "learning_rate": 5.96028068317225e-06, - "loss": 0.6491, - "step": 111 - }, - { - "epoch": 0.01, - "learning_rate": 5.959883490003972e-06, - "loss": 0.6672, - "step": 112 - }, - { - "epoch": 0.01, - "learning_rate": 5.959486296835694e-06, - "loss": 0.6279, - "step": 113 - }, - { - "epoch": 0.02, - "learning_rate": 5.959089103667417e-06, - "loss": 0.6479, - "step": 114 - }, - { - "epoch": 0.02, - "learning_rate": 5.9586919104991395e-06, - "loss": 0.6214, - "step": 115 - }, - { - "epoch": 0.02, - "learning_rate": 5.958294717330862e-06, - "loss": 0.6618, - "step": 116 - }, - { - "epoch": 0.02, - "learning_rate": 5.957897524162585e-06, - "loss": 0.6703, - "step": 117 - }, - { - "epoch": 0.02, - "learning_rate": 5.957500330994307e-06, - "loss": 0.6417, - "step": 118 - }, - { - "epoch": 0.02, - "learning_rate": 5.957103137826029e-06, - "loss": 0.631, - "step": 119 - }, - { - "epoch": 0.02, - "learning_rate": 5.956705944657752e-06, - "loss": 0.6169, - "step": 120 - }, - { - "epoch": 0.02, - "learning_rate": 5.956308751489475e-06, - "loss": 0.6521, - "step": 121 - }, - { - "epoch": 0.02, - "learning_rate": 5.955911558321197e-06, - "loss": 0.6635, - "step": 122 - }, - { - "epoch": 0.02, - "learning_rate": 5.955514365152919e-06, - "loss": 0.6496, - "step": 123 - }, - { - "epoch": 0.02, - "learning_rate": 5.955117171984642e-06, - "loss": 0.6431, - "step": 124 - }, - { - "epoch": 0.02, - "learning_rate": 5.954719978816365e-06, - "loss": 0.6246, - "step": 125 - }, - { - "epoch": 0.02, - "learning_rate": 5.954322785648087e-06, - "loss": 0.6557, - "step": 126 - }, - { - "epoch": 0.02, - "learning_rate": 5.953925592479809e-06, - "loss": 0.6082, - "step": 127 - }, - { - "epoch": 0.02, - "learning_rate": 5.953528399311532e-06, - "loss": 0.5941, - "step": 128 - }, - { - "epoch": 0.02, - "learning_rate": 5.953131206143255e-06, - "loss": 0.6566, - "step": 129 - }, - { - "epoch": 0.02, - "learning_rate": 5.952734012974977e-06, - "loss": 0.6243, - "step": 130 - }, - { - "epoch": 0.02, - "learning_rate": 5.952336819806699e-06, - "loss": 0.594, - "step": 131 - }, - { - "epoch": 0.02, - "learning_rate": 5.951939626638422e-06, - "loss": 0.68, - "step": 132 - }, - { - "epoch": 0.02, - "learning_rate": 5.9515424334701446e-06, - "loss": 0.6302, - "step": 133 - }, - { - "epoch": 0.02, - "learning_rate": 5.951145240301867e-06, - "loss": 0.6251, - "step": 134 - }, - { - "epoch": 0.02, - "learning_rate": 5.950748047133589e-06, - "loss": 0.6326, - "step": 135 - }, - { - "epoch": 0.02, - "learning_rate": 5.950350853965312e-06, - "loss": 0.6314, - "step": 136 - }, - { - "epoch": 0.02, - "learning_rate": 5.949953660797034e-06, - "loss": 0.6598, - "step": 137 - }, - { - "epoch": 0.02, - "learning_rate": 5.949556467628757e-06, - "loss": 0.6583, - "step": 138 - }, - { - "epoch": 0.02, - "learning_rate": 5.949159274460479e-06, - "loss": 0.6162, - "step": 139 - }, - { - "epoch": 0.02, - "learning_rate": 5.948762081292202e-06, - "loss": 0.7042, - "step": 140 - }, - { - "epoch": 0.02, - "learning_rate": 5.9483648881239245e-06, - "loss": 0.6733, - "step": 141 - }, - { - "epoch": 0.02, - "learning_rate": 5.947967694955647e-06, - "loss": 0.6103, - "step": 142 - }, - { - "epoch": 0.02, - "learning_rate": 5.94757050178737e-06, - "loss": 0.6269, - "step": 143 - }, - { - "epoch": 0.02, - "learning_rate": 5.947173308619092e-06, - "loss": 0.663, - "step": 144 - }, - { - "epoch": 0.02, - "learning_rate": 5.946776115450814e-06, - "loss": 0.5794, - "step": 145 - }, - { - "epoch": 0.02, - "learning_rate": 5.946378922282537e-06, - "loss": 0.6868, - "step": 146 - }, - { - "epoch": 0.02, - "learning_rate": 5.945981729114259e-06, - "loss": 0.6064, - "step": 147 - }, - { - "epoch": 0.02, - "learning_rate": 5.945584535945982e-06, - "loss": 0.6519, - "step": 148 - }, - { - "epoch": 0.02, - "learning_rate": 5.9451873427777045e-06, - "loss": 0.655, - "step": 149 - }, - { - "epoch": 0.02, - "learning_rate": 5.944790149609427e-06, - "loss": 0.6617, - "step": 150 - }, - { - "epoch": 0.02, - "learning_rate": 5.94439295644115e-06, - "loss": 0.627, - "step": 151 - }, - { - "epoch": 0.02, - "learning_rate": 5.943995763272872e-06, - "loss": 0.5837, - "step": 152 - }, - { - "epoch": 0.02, - "learning_rate": 5.943598570104594e-06, - "loss": 0.6201, - "step": 153 - }, - { - "epoch": 0.02, - "learning_rate": 5.943201376936317e-06, - "loss": 0.6291, - "step": 154 - }, - { - "epoch": 0.02, - "learning_rate": 5.942804183768039e-06, - "loss": 0.6061, - "step": 155 - }, - { - "epoch": 0.02, - "learning_rate": 5.942406990599761e-06, - "loss": 0.624, - "step": 156 - }, - { - "epoch": 0.02, - "learning_rate": 5.942009797431484e-06, - "loss": 0.6418, - "step": 157 - }, - { - "epoch": 0.02, - "learning_rate": 5.941612604263207e-06, - "loss": 0.5858, - "step": 158 - }, - { - "epoch": 0.02, - "learning_rate": 5.94121541109493e-06, - "loss": 0.6407, - "step": 159 - }, - { - "epoch": 0.02, - "learning_rate": 5.940818217926652e-06, - "loss": 0.6222, - "step": 160 - }, - { - "epoch": 0.02, - "learning_rate": 5.940421024758374e-06, - "loss": 0.5938, - "step": 161 - }, - { - "epoch": 0.02, - "learning_rate": 5.940023831590097e-06, - "loss": 0.6157, - "step": 162 - }, - { - "epoch": 0.02, - "learning_rate": 5.939626638421819e-06, - "loss": 0.5989, - "step": 163 - }, - { - "epoch": 0.02, - "learning_rate": 5.939229445253541e-06, - "loss": 0.7056, - "step": 164 - }, - { - "epoch": 0.02, - "learning_rate": 5.938832252085264e-06, - "loss": 0.6606, - "step": 165 - }, - { - "epoch": 0.02, - "learning_rate": 5.9384350589169865e-06, - "loss": 0.6303, - "step": 166 - }, - { - "epoch": 0.02, - "learning_rate": 5.9380378657487095e-06, - "loss": 0.6332, - "step": 167 - }, - { - "epoch": 0.02, - "learning_rate": 5.937640672580432e-06, - "loss": 0.6197, - "step": 168 - }, - { - "epoch": 0.02, - "learning_rate": 5.937243479412155e-06, - "loss": 0.6318, - "step": 169 - }, - { - "epoch": 0.02, - "learning_rate": 5.936846286243877e-06, - "loss": 0.6598, - "step": 170 - }, - { - "epoch": 0.02, - "learning_rate": 5.936449093075599e-06, - "loss": 0.662, - "step": 171 - }, - { - "epoch": 0.02, - "learning_rate": 5.936051899907321e-06, - "loss": 0.6018, - "step": 172 - }, - { - "epoch": 0.02, - "learning_rate": 5.935654706739044e-06, - "loss": 0.6955, - "step": 173 - }, - { - "epoch": 0.02, - "learning_rate": 5.9352575135707665e-06, - "loss": 0.6283, - "step": 174 - }, - { - "epoch": 0.02, - "learning_rate": 5.934860320402489e-06, - "loss": 0.6829, - "step": 175 - }, - { - "epoch": 0.02, - "learning_rate": 5.934463127234212e-06, - "loss": 0.5985, - "step": 176 - }, - { - "epoch": 0.02, - "learning_rate": 5.934065934065935e-06, - "loss": 0.6385, - "step": 177 - }, - { - "epoch": 0.02, - "learning_rate": 5.933668740897657e-06, - "loss": 0.6326, - "step": 178 - }, - { - "epoch": 0.02, - "learning_rate": 5.933271547729379e-06, - "loss": 0.639, - "step": 179 - }, - { - "epoch": 0.02, - "learning_rate": 5.932874354561102e-06, - "loss": 0.6084, - "step": 180 - }, - { - "epoch": 0.02, - "learning_rate": 5.932477161392824e-06, - "loss": 0.6549, - "step": 181 - }, - { - "epoch": 0.02, - "learning_rate": 5.932079968224546e-06, - "loss": 0.6728, - "step": 182 - }, - { - "epoch": 0.02, - "learning_rate": 5.931682775056269e-06, - "loss": 0.6351, - "step": 183 - }, - { - "epoch": 0.02, - "learning_rate": 5.931285581887992e-06, - "loss": 0.6375, - "step": 184 - }, - { - "epoch": 0.02, - "learning_rate": 5.930888388719714e-06, - "loss": 0.6814, - "step": 185 - }, - { - "epoch": 0.02, - "learning_rate": 5.930491195551437e-06, - "loss": 0.5968, - "step": 186 - }, - { - "epoch": 0.02, - "learning_rate": 5.930094002383159e-06, - "loss": 0.6053, - "step": 187 - }, - { - "epoch": 0.02, - "learning_rate": 5.929696809214882e-06, - "loss": 0.6468, - "step": 188 - }, - { - "epoch": 0.03, - "learning_rate": 5.929299616046604e-06, - "loss": 0.6407, - "step": 189 - }, - { - "epoch": 0.03, - "learning_rate": 5.928902422878326e-06, - "loss": 0.6996, - "step": 190 - }, - { - "epoch": 0.03, - "learning_rate": 5.928505229710049e-06, - "loss": 0.6158, - "step": 191 - }, - { - "epoch": 0.03, - "learning_rate": 5.9281080365417716e-06, - "loss": 0.6128, - "step": 192 - }, - { - "epoch": 0.03, - "learning_rate": 5.927710843373494e-06, - "loss": 0.6558, - "step": 193 - }, - { - "epoch": 0.03, - "learning_rate": 5.927313650205216e-06, - "loss": 0.6726, - "step": 194 - }, - { - "epoch": 0.03, - "learning_rate": 5.92691645703694e-06, - "loss": 0.6292, - "step": 195 - }, - { - "epoch": 0.03, - "learning_rate": 5.926519263868662e-06, - "loss": 0.6004, - "step": 196 - }, - { - "epoch": 0.03, - "learning_rate": 5.926122070700384e-06, - "loss": 0.599, - "step": 197 - }, - { - "epoch": 0.03, - "learning_rate": 5.925724877532106e-06, - "loss": 0.6374, - "step": 198 - }, - { - "epoch": 0.03, - "learning_rate": 5.925327684363829e-06, - "loss": 0.6472, - "step": 199 - }, - { - "epoch": 0.03, - "learning_rate": 5.9249304911955515e-06, - "loss": 0.594, - "step": 200 - }, - { - "epoch": 0.03, - "learning_rate": 5.924533298027274e-06, - "loss": 0.6382, - "step": 201 - }, - { - "epoch": 0.03, - "learning_rate": 5.924136104858997e-06, - "loss": 0.5817, - "step": 202 - }, - { - "epoch": 0.03, - "learning_rate": 5.923738911690719e-06, - "loss": 0.6128, - "step": 203 - }, - { - "epoch": 0.03, - "learning_rate": 5.923341718522442e-06, - "loss": 0.651, - "step": 204 - }, - { - "epoch": 0.03, - "learning_rate": 5.922944525354164e-06, - "loss": 0.5681, - "step": 205 - }, - { - "epoch": 0.03, - "learning_rate": 5.922547332185887e-06, - "loss": 0.6183, - "step": 206 - }, - { - "epoch": 0.03, - "learning_rate": 5.922150139017609e-06, - "loss": 0.5867, - "step": 207 - }, - { - "epoch": 0.03, - "learning_rate": 5.9217529458493315e-06, - "loss": 0.6048, - "step": 208 - }, - { - "epoch": 0.03, - "learning_rate": 5.921355752681054e-06, - "loss": 0.6968, - "step": 209 - }, - { - "epoch": 0.03, - "learning_rate": 5.920958559512777e-06, - "loss": 0.6259, - "step": 210 - }, - { - "epoch": 0.03, - "learning_rate": 5.920561366344499e-06, - "loss": 0.6076, - "step": 211 - }, - { - "epoch": 0.03, - "learning_rate": 5.920164173176221e-06, - "loss": 0.64, - "step": 212 - }, - { - "epoch": 0.03, - "learning_rate": 5.919766980007944e-06, - "loss": 0.6249, - "step": 213 - }, - { - "epoch": 0.03, - "learning_rate": 5.919369786839667e-06, - "loss": 0.6331, - "step": 214 - }, - { - "epoch": 0.03, - "learning_rate": 5.918972593671389e-06, - "loss": 0.6466, - "step": 215 - }, - { - "epoch": 0.03, - "learning_rate": 5.918575400503111e-06, - "loss": 0.5982, - "step": 216 - }, - { - "epoch": 0.03, - "learning_rate": 5.9181782073348344e-06, - "loss": 0.5719, - "step": 217 - }, - { - "epoch": 0.03, - "learning_rate": 5.917781014166557e-06, - "loss": 0.6032, - "step": 218 - }, - { - "epoch": 0.03, - "learning_rate": 5.917383820998279e-06, - "loss": 0.5741, - "step": 219 - }, - { - "epoch": 0.03, - "learning_rate": 5.916986627830001e-06, - "loss": 0.58, - "step": 220 - }, - { - "epoch": 0.03, - "learning_rate": 5.916589434661724e-06, - "loss": 0.6232, - "step": 221 - }, - { - "epoch": 0.03, - "learning_rate": 5.916192241493446e-06, - "loss": 0.6287, - "step": 222 - }, - { - "epoch": 0.03, - "learning_rate": 5.915795048325169e-06, - "loss": 0.6344, - "step": 223 - }, - { - "epoch": 0.03, - "learning_rate": 5.915397855156891e-06, - "loss": 0.6536, - "step": 224 - }, - { - "epoch": 0.03, - "learning_rate": 5.915000661988614e-06, - "loss": 0.6297, - "step": 225 - }, - { - "epoch": 0.03, - "learning_rate": 5.9146034688203365e-06, - "loss": 0.5635, - "step": 226 - }, - { - "epoch": 0.03, - "learning_rate": 5.914206275652059e-06, - "loss": 0.5931, - "step": 227 - }, - { - "epoch": 0.03, - "learning_rate": 5.913809082483782e-06, - "loss": 0.5681, - "step": 228 - }, - { - "epoch": 0.03, - "learning_rate": 5.913411889315504e-06, - "loss": 0.6155, - "step": 229 - }, - { - "epoch": 0.03, - "learning_rate": 5.913014696147226e-06, - "loss": 0.605, - "step": 230 - }, - { - "epoch": 0.03, - "learning_rate": 5.912617502978948e-06, - "loss": 0.6364, - "step": 231 - }, - { - "epoch": 0.03, - "learning_rate": 5.912220309810671e-06, - "loss": 0.6333, - "step": 232 - }, - { - "epoch": 0.03, - "learning_rate": 5.911823116642394e-06, - "loss": 0.6666, - "step": 233 - }, - { - "epoch": 0.03, - "learning_rate": 5.9114259234741165e-06, - "loss": 0.6296, - "step": 234 - }, - { - "epoch": 0.03, - "learning_rate": 5.911028730305839e-06, - "loss": 0.6422, - "step": 235 - }, - { - "epoch": 0.03, - "learning_rate": 5.910631537137562e-06, - "loss": 0.6426, - "step": 236 - }, - { - "epoch": 0.03, - "learning_rate": 5.910234343969284e-06, - "loss": 0.6389, - "step": 237 - }, - { - "epoch": 0.03, - "learning_rate": 5.909837150801006e-06, - "loss": 0.5695, - "step": 238 - }, - { - "epoch": 0.03, - "learning_rate": 5.909439957632729e-06, - "loss": 0.6271, - "step": 239 - }, - { - "epoch": 0.03, - "learning_rate": 5.909042764464451e-06, - "loss": 0.5981, - "step": 240 - }, - { - "epoch": 0.03, - "learning_rate": 5.908645571296173e-06, - "loss": 0.6345, - "step": 241 - }, - { - "epoch": 0.03, - "learning_rate": 5.9082483781278964e-06, - "loss": 0.6404, - "step": 242 - }, - { - "epoch": 0.03, - "learning_rate": 5.9078511849596195e-06, - "loss": 0.6046, - "step": 243 - }, - { - "epoch": 0.03, - "learning_rate": 5.907453991791342e-06, - "loss": 0.5791, - "step": 244 - }, - { - "epoch": 0.03, - "learning_rate": 5.907056798623064e-06, - "loss": 0.623, - "step": 245 - }, - { - "epoch": 0.03, - "learning_rate": 5.906659605454786e-06, - "loss": 0.6274, - "step": 246 - }, - { - "epoch": 0.03, - "learning_rate": 5.906262412286509e-06, - "loss": 0.629, - "step": 247 - }, - { - "epoch": 0.03, - "learning_rate": 5.905865219118231e-06, - "loss": 0.6967, - "step": 248 - }, - { - "epoch": 0.03, - "learning_rate": 5.905468025949953e-06, - "loss": 0.6649, - "step": 249 - }, - { - "epoch": 0.03, - "learning_rate": 5.905070832781676e-06, - "loss": 0.6514, - "step": 250 - } - ], - "logging_steps": 1, - "max_steps": 15116, - "num_train_epochs": 2, - "save_steps": 50, - "total_flos": 1.048510703075328e+18, - "trial_name": null, - "trial_params": null -} diff --git a/checkpoint-250/training_args.bin b/checkpoint-250/training_args.bin deleted file mode 100644 index 731dabe55350d521ab0dfde0b2f023771c347250..0000000000000000000000000000000000000000 --- a/checkpoint-250/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2f05be88d930176935da1678b48a8294634889bf7ae4f8bebdbaca140c2dac08 -size 5947 diff --git a/checkpoint-250/zero_to_fp32.py b/checkpoint-250/zero_to_fp32.py deleted file mode 100755 index c98caae31534368be22b67fc4ae906836c992a8d..0000000000000000000000000000000000000000 --- a/checkpoint-250/zero_to_fp32.py +++ /dev/null @@ -1,587 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag) diff --git a/ckpt50/config.json b/ckpt50/config.json deleted file mode 100644 index 82bacd02f94ab4a4cfdb7b12a0484fac8f301916..0000000000000000000000000000000000000000 --- a/ckpt50/config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "_name_or_path": "mistralai/Mistral-7B-v0.1", - "architectures": [ - "MistralForCausalLM" - ], - "bos_token_id": 1, - "eos_token_id": 2, - "hidden_act": "silu", - "hidden_size": 4096, - "initializer_range": 0.02, - "intermediate_size": 14336, - "max_position_embeddings": 32768, - "model_type": "mistral", - "num_attention_heads": 32, - "num_hidden_layers": 32, - "num_key_value_heads": 8, - "rms_norm_eps": 1e-05, - "rope_theta": 10000.0, - "sliding_window": 4096, - "tie_word_embeddings": false, - "torch_dtype": "bfloat16", - "transformers_version": "4.34.0.dev0", - "use_cache": false, - "vocab_size": 32002 -} diff --git a/ckpt50/generation_config.json b/ckpt50/generation_config.json deleted file mode 100644 index 2c5f418036a121b3fd432d1bf2b3c5c9daf59fab..0000000000000000000000000000000000000000 --- a/ckpt50/generation_config.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "_from_model_config": true, - "bos_token_id": 1, - "eos_token_id": 2, - "transformers_version": "4.34.0.dev0" -} diff --git a/ckpt50/latest b/ckpt50/latest deleted file mode 100644 index 9b4dc801e3fb152ef5c0ee60d309c705a9b01564..0000000000000000000000000000000000000000 --- a/ckpt50/latest +++ /dev/null @@ -1 +0,0 @@ -global_step50 \ No newline at end of file diff --git a/ckpt50/pytorch_model-00001-of-00002.bin b/ckpt50/pytorch_model-00001-of-00002.bin deleted file mode 100644 index 5763f40101dbcef8a5301e8e4f8143c5ee3a0214..0000000000000000000000000000000000000000 --- a/ckpt50/pytorch_model-00001-of-00002.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:74a299c2c28cb9b40f392afd5819e34cdf713bb77c71aca3038602dab286095c -size 9943044428 diff --git a/ckpt50/pytorch_model-00002-of-00002.bin b/ckpt50/pytorch_model-00002-of-00002.bin deleted file mode 100644 index 69d5a838ffdef727c6be26e344738723ad7844c0..0000000000000000000000000000000000000000 --- a/ckpt50/pytorch_model-00002-of-00002.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:078f72ce86f271f6edfb8ca9e12be814398f4b4223c1ccfdc7415da377ff3e1f -size 4540552031 diff --git a/ckpt50/pytorch_model.bin.index.json b/ckpt50/pytorch_model.bin.index.json deleted file mode 100644 index 53213fb82ddc02718be2ce686f00ba7fb0af95e7..0000000000000000000000000000000000000000 --- a/ckpt50/pytorch_model.bin.index.json +++ /dev/null @@ -1,298 +0,0 @@ -{ - "metadata": { - "total_size": 14483496960 - }, - "weight_map": { - "lm_head.weight": "pytorch_model-00002-of-00002.bin", - "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin", - "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin", - "model.norm.weight": "pytorch_model-00002-of-00002.bin" - } -} diff --git a/ckpt50/rng_state_0.pth b/ckpt50/rng_state_0.pth deleted file mode 100644 index 24aba7bcc2a9fb783bd13a6a67b96e5ad055d89d..0000000000000000000000000000000000000000 --- a/ckpt50/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1eafe3d5e0585dde8c5033613de99a5d4f23df4284a488f4007b3944580c0b97 -size 17655 diff --git a/ckpt50/rng_state_1.pth b/ckpt50/rng_state_1.pth deleted file mode 100644 index 6b2ef88173fde17f2b3e738a28446f89a0528a96..0000000000000000000000000000000000000000 --- a/ckpt50/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e34eb456d2d003a2839f2daa9425e99bdd79ed7e24a1de9fc7d5738476bfb4b -size 17655 diff --git a/ckpt50/rng_state_2.pth b/ckpt50/rng_state_2.pth deleted file mode 100644 index 7b118d52a3006aea6c44f23f94c5568d1fb0a2f3..0000000000000000000000000000000000000000 --- a/ckpt50/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b374af4a2765d8771cee7a72921d3c2e438b9bee34f0b2d098ce6071afeb65e4 -size 17655 diff --git a/ckpt50/rng_state_3.pth b/ckpt50/rng_state_3.pth deleted file mode 100644 index 3f6fd9aa58eb1d5815ca991134531a3280601900..0000000000000000000000000000000000000000 --- a/ckpt50/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5df75d8477fcc69c7abb03025313915ebfe3ac18c54a7c57aaa455c0099e13e5 -size 17655 diff --git a/ckpt50/trainer_state.json b/ckpt50/trainer_state.json deleted file mode 100644 index 052244bb1c15d29f937923a54560092bc86574ed..0000000000000000000000000000000000000000 --- a/ckpt50/trainer_state.json +++ /dev/null @@ -1,343 +0,0 @@ -{ - "best_metric": null, - "best_model_checkpoint": null, - "epoch": 0.006615506747816882, - "eval_steps": 756, - "global_step": 50, - "is_hyper_param_search": false, - "is_local_process_zero": true, - "is_world_process_zero": true, - "log_history": [ - { - "epoch": 0.0, - "learning_rate": 0.0, - "loss": 0.929, - "step": 1 - }, - { - "epoch": 0.0, - "eval_loss": 0.9224275946617126, - "eval_runtime": 2.17, - "eval_samples_per_second": 79.724, - "eval_steps_per_second": 3.687, - "step": 1 - }, - { - "epoch": 0.0, - "eval_bench_accuracy_agieval": 0.288135593220339, - "eval_bench_accuracy_arc_challenge": 0.8148148148148148, - "eval_bench_accuracy_arc_easy": 0.9074074074074074, - "eval_bench_accuracy_bigbench": 0.3442622950819672, - "eval_bench_accuracy_boolq": 0.5185185185185185, - "eval_bench_accuracy_mmlu": 0.48148148148148145, - "eval_bench_accuracy_openbookqa": 0.14814814814814814, - "eval_bench_accuracy_truthful_qa": 0.37735849056603776, - "eval_bench_accuracy_winogrande": 0.4074074074074074, - "eval_bench_average_accuracy": 0.4763926840717912, - "eval_bench_loss": 5.786159653261484, - "eval_bench_total_accuracy": 0.47283702213279677, - "step": 1 - }, - { - "epoch": 0.0, - "learning_rate": 6.000000000000001e-07, - "loss": 0.8533, - "step": 2 - }, - { - "epoch": 0.0, - "learning_rate": 1.2000000000000002e-06, - "loss": 0.9641, - "step": 3 - }, - { - "epoch": 0.0, - "learning_rate": 1.8e-06, - "loss": 0.8488, - "step": 4 - }, - { - "epoch": 0.0, - "learning_rate": 2.4000000000000003e-06, - "loss": 0.8863, - "step": 5 - }, - { - "epoch": 0.0, - "learning_rate": 3e-06, - "loss": 0.7988, - "step": 6 - }, - { - "epoch": 0.0, - "learning_rate": 3.6e-06, - "loss": 0.7789, - "step": 7 - }, - { - "epoch": 0.0, - "learning_rate": 4.2e-06, - "loss": 0.7144, - "step": 8 - }, - { - "epoch": 0.0, - "learning_rate": 4.800000000000001e-06, - "loss": 0.8322, - "step": 9 - }, - { - "epoch": 0.0, - "learning_rate": 5.4e-06, - "loss": 0.734, - "step": 10 - }, - { - "epoch": 0.0, - "learning_rate": 6e-06, - "loss": 0.7861, - "step": 11 - }, - { - "epoch": 0.0, - "learning_rate": 5.999602806831722e-06, - "loss": 0.6733, - "step": 12 - }, - { - "epoch": 0.0, - "learning_rate": 5.999205613663445e-06, - "loss": 0.7019, - "step": 13 - }, - { - "epoch": 0.0, - "learning_rate": 5.9988084204951675e-06, - "loss": 0.7096, - "step": 14 - }, - { - "epoch": 0.0, - "learning_rate": 5.99841122732689e-06, - "loss": 0.6745, - "step": 15 - }, - { - "epoch": 0.0, - "learning_rate": 5.998014034158613e-06, - "loss": 0.8022, - "step": 16 - }, - { - "epoch": 0.0, - "learning_rate": 5.997616840990336e-06, - "loss": 0.7753, - "step": 17 - }, - { - "epoch": 0.0, - "learning_rate": 5.997219647822058e-06, - "loss": 0.6939, - "step": 18 - }, - { - "epoch": 0.0, - "learning_rate": 5.99682245465378e-06, - "loss": 0.689, - "step": 19 - }, - { - "epoch": 0.0, - "learning_rate": 5.996425261485502e-06, - "loss": 0.7419, - "step": 20 - }, - { - "epoch": 0.0, - "learning_rate": 5.996028068317225e-06, - "loss": 0.6975, - "step": 21 - }, - { - "epoch": 0.0, - "learning_rate": 5.9956308751489475e-06, - "loss": 0.686, - "step": 22 - }, - { - "epoch": 0.0, - "learning_rate": 5.99523368198067e-06, - "loss": 0.7576, - "step": 23 - }, - { - "epoch": 0.0, - "learning_rate": 5.994836488812393e-06, - "loss": 0.6802, - "step": 24 - }, - { - "epoch": 0.0, - "learning_rate": 5.994439295644115e-06, - "loss": 0.711, - "step": 25 - }, - { - "epoch": 0.0, - "learning_rate": 5.994042102475838e-06, - "loss": 0.6658, - "step": 26 - }, - { - "epoch": 0.0, - "learning_rate": 5.99364490930756e-06, - "loss": 0.685, - "step": 27 - }, - { - "epoch": 0.0, - "learning_rate": 5.993247716139283e-06, - "loss": 0.6881, - "step": 28 - }, - { - "epoch": 0.0, - "learning_rate": 5.992850522971005e-06, - "loss": 0.7066, - "step": 29 - }, - { - "epoch": 0.0, - "learning_rate": 5.992453329802727e-06, - "loss": 0.6993, - "step": 30 - }, - { - "epoch": 0.0, - "learning_rate": 5.99205613663445e-06, - "loss": 0.6429, - "step": 31 - }, - { - "epoch": 0.0, - "learning_rate": 5.991658943466173e-06, - "loss": 0.7205, - "step": 32 - }, - { - "epoch": 0.0, - "learning_rate": 5.991261750297895e-06, - "loss": 0.703, - "step": 33 - }, - { - "epoch": 0.0, - "learning_rate": 5.990864557129617e-06, - "loss": 0.6512, - "step": 34 - }, - { - "epoch": 0.0, - "learning_rate": 5.99046736396134e-06, - "loss": 0.6583, - "step": 35 - }, - { - "epoch": 0.0, - "learning_rate": 5.990070170793063e-06, - "loss": 0.6904, - "step": 36 - }, - { - "epoch": 0.0, - "learning_rate": 5.989672977624785e-06, - "loss": 0.6619, - "step": 37 - }, - { - "epoch": 0.01, - "learning_rate": 5.989275784456507e-06, - "loss": 0.7033, - "step": 38 - }, - { - "epoch": 0.01, - "learning_rate": 5.98887859128823e-06, - "loss": 0.6522, - "step": 39 - }, - { - "epoch": 0.01, - "learning_rate": 5.9884813981199526e-06, - "loss": 0.6365, - "step": 40 - }, - { - "epoch": 0.01, - "learning_rate": 5.988084204951675e-06, - "loss": 0.6422, - "step": 41 - }, - { - "epoch": 0.01, - "learning_rate": 5.987687011783397e-06, - "loss": 0.625, - "step": 42 - }, - { - "epoch": 0.01, - "learning_rate": 5.98728981861512e-06, - "loss": 0.6148, - "step": 43 - }, - { - "epoch": 0.01, - "learning_rate": 5.986892625446843e-06, - "loss": 0.7089, - "step": 44 - }, - { - "epoch": 0.01, - "learning_rate": 5.986495432278565e-06, - "loss": 0.681, - "step": 45 - }, - { - "epoch": 0.01, - "learning_rate": 5.986098239110287e-06, - "loss": 0.6505, - "step": 46 - }, - { - "epoch": 0.01, - "learning_rate": 5.98570104594201e-06, - "loss": 0.6531, - "step": 47 - }, - { - "epoch": 0.01, - "learning_rate": 5.9853038527737325e-06, - "loss": 0.6683, - "step": 48 - }, - { - "epoch": 0.01, - "learning_rate": 5.984906659605455e-06, - "loss": 0.6967, - "step": 49 - }, - { - "epoch": 0.01, - "learning_rate": 5.984509466437178e-06, - "loss": 0.6514, - "step": 50 - } - ], - "logging_steps": 1, - "max_steps": 15116, - "num_train_epochs": 2, - "save_steps": 50, - "total_flos": 2.097021406150656e+17, - "trial_name": null, - "trial_params": null -} diff --git a/ckpt50/training_args.bin b/ckpt50/training_args.bin deleted file mode 100644 index fd48cb2c1f189a745f6dd9ed1b31fb65735ab74f..0000000000000000000000000000000000000000 --- a/ckpt50/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d199aaf2c0faf27119865363ae8807d2a26fe0d807759935113104fe056a8359 -size 5947 diff --git a/ckpt50/zero_to_fp32.py b/ckpt50/zero_to_fp32.py deleted file mode 100755 index c98caae31534368be22b67fc4ae906836c992a8d..0000000000000000000000000000000000000000 --- a/ckpt50/zero_to_fp32.py +++ /dev/null @@ -1,587 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag) diff --git a/latest b/latest deleted file mode 100644 index 9b4dc801e3fb152ef5c0ee60d309c705a9b01564..0000000000000000000000000000000000000000 --- a/latest +++ /dev/null @@ -1 +0,0 @@ -global_step50 \ No newline at end of file diff --git a/rng_state_1.pth b/rng_state_1.pth deleted file mode 100644 index 6b2ef88173fde17f2b3e738a28446f89a0528a96..0000000000000000000000000000000000000000 --- a/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e34eb456d2d003a2839f2daa9425e99bdd79ed7e24a1de9fc7d5738476bfb4b -size 17655 diff --git a/rng_state_2.pth b/rng_state_2.pth deleted file mode 100644 index 7b118d52a3006aea6c44f23f94c5568d1fb0a2f3..0000000000000000000000000000000000000000 --- a/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b374af4a2765d8771cee7a72921d3c2e438b9bee34f0b2d098ce6071afeb65e4 -size 17655 diff --git a/rng_state_3.pth b/rng_state_3.pth deleted file mode 100644 index 3f6fd9aa58eb1d5815ca991134531a3280601900..0000000000000000000000000000000000000000 --- a/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5df75d8477fcc69c7abb03025313915ebfe3ac18c54a7c57aaa455c0099e13e5 -size 17655 diff --git a/zero_to_fp32.py b/zero_to_fp32.py deleted file mode 100755 index c98caae31534368be22b67fc4ae906836c992a8d..0000000000000000000000000000000000000000 --- a/zero_to_fp32.py +++ /dev/null @@ -1,587 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)