davidanugraha commited on
Commit
e55ea03
·
verified ·
1 Parent(s): 2b69587

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2560,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 9728,
14
+ "max_position_embeddings": 40960,
15
+ "max_window_layers": 36,
16
+ "model_type": "qwen3",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 36,
19
+ "num_key_value_heads": 8,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": null,
22
+ "rope_theta": 1000000,
23
+ "sliding_window": null,
24
+ "tie_word_embeddings": true,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.52.4",
27
+ "use_cache": false,
28
+ "use_sliding_window": false,
29
+ "vocab_size": 151936
30
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.6,
10
+ "top_k": 20,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.52.4"
13
+ }
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step270
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b997082106e78eaed460b93d874cc4baab4aa514ba7eadaee385d63d81c70f77
3
+ size 4967215360
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9c21a8df166f2ab99c72a436ddf22a2fe96c5f4e5aee21c8b2a8fc882e78b79
3
+ size 3077766632
model.safetensors.index.json ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 8044936192
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
7
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
8
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
23
+ "model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
39
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
71
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
87
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.15.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
93
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
94
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
96
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
98
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
99
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
100
+ "model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
101
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
102
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
103
+ "model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
104
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
105
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
106
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
107
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
108
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
111
+ "model.layers.17.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
112
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.layers.17.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
115
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
116
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
117
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
118
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
119
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
120
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
121
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
122
+ "model.layers.18.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
123
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
124
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.layers.18.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
126
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
127
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
128
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
129
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
130
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
131
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
133
+ "model.layers.19.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
134
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
135
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
136
+ "model.layers.19.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
137
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
138
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
139
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
140
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
141
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
142
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
143
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
144
+ "model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
145
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
146
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
151
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
152
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
153
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
154
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
155
+ "model.layers.20.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
156
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
157
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
158
+ "model.layers.20.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
159
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
160
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
161
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
162
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
163
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
164
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
165
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
166
+ "model.layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
167
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
168
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
169
+ "model.layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
170
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
171
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
172
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
173
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
174
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
175
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
176
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
177
+ "model.layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
178
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
179
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
180
+ "model.layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
181
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
182
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
183
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
184
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
185
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
186
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
187
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
188
+ "model.layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
189
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
190
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
191
+ "model.layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
192
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
193
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
194
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
195
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
196
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
197
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
198
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
199
+ "model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
200
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
201
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
202
+ "model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
203
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
204
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
205
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
206
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
207
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
208
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
209
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
210
+ "model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
211
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
212
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
213
+ "model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
214
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
215
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
216
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
217
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
218
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
219
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
220
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
221
+ "model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
222
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
223
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
224
+ "model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
225
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
226
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
227
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
228
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
229
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
230
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
231
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
232
+ "model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
233
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
234
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
235
+ "model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
236
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
237
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
238
+ "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
239
+ "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
240
+ "model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
241
+ "model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
242
+ "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
243
+ "model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
244
+ "model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
245
+ "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
246
+ "model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
247
+ "model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
248
+ "model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
249
+ "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
250
+ "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
251
+ "model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
252
+ "model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
253
+ "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
254
+ "model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
255
+ "model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
256
+ "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
257
+ "model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
258
+ "model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
259
+ "model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
260
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
261
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
262
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
263
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
264
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
265
+ "model.layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
266
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
267
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
268
+ "model.layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
269
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
270
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
271
+ "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
272
+ "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
273
+ "model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
274
+ "model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
275
+ "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
276
+ "model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
277
+ "model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
278
+ "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
279
+ "model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
280
+ "model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
281
+ "model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
282
+ "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
283
+ "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
284
+ "model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
285
+ "model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
286
+ "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
287
+ "model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
288
+ "model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
289
+ "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
290
+ "model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
291
+ "model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
292
+ "model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
293
+ "model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
294
+ "model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
295
+ "model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
296
+ "model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
297
+ "model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
298
+ "model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
299
+ "model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
300
+ "model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
301
+ "model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
302
+ "model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
303
+ "model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
304
+ "model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
305
+ "model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
306
+ "model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
307
+ "model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
308
+ "model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
309
+ "model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
310
+ "model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
311
+ "model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
312
+ "model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
313
+ "model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
314
+ "model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
315
+ "model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
316
+ "model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
317
+ "model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
318
+ "model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
319
+ "model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
320
+ "model.layers.34.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
321
+ "model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
322
+ "model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
323
+ "model.layers.34.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
324
+ "model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
325
+ "model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
326
+ "model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
327
+ "model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
328
+ "model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
329
+ "model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
330
+ "model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
331
+ "model.layers.35.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
332
+ "model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
333
+ "model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
334
+ "model.layers.35.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
335
+ "model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
336
+ "model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
337
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
338
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
339
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
340
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
341
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
342
+ "model.layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
343
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
344
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
345
+ "model.layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
346
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
347
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
348
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
349
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
350
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
351
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
352
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
353
+ "model.layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
354
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
355
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
356
+ "model.layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
357
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
358
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
359
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
360
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
361
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
362
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
363
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
364
+ "model.layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
365
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
366
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
367
+ "model.layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
368
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
369
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
370
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
371
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
372
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
373
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
374
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
375
+ "model.layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
376
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
377
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
378
+ "model.layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
379
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
380
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
381
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
382
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
383
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
384
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
385
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
386
+ "model.layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
387
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
388
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
389
+ "model.layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
390
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
391
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
392
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
393
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
394
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
395
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
396
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
397
+ "model.layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
398
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
399
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
400
+ "model.layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
401
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
402
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
403
+ "model.norm.weight": "model-00002-of-00002.safetensors"
404
+ }
405
+ }
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92cc13315f24c28015d695b6cde08bb1cd6fea4cbc435998485ed6fbe4c91285
3
+ size 15024
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4c154b6a63e0b1f98f7d2847944398f99f1657d35e8eddf7fdf0ae2c24b0552
3
+ size 15024
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f784c6a9507b51189f2caffbd178ea9882103b75852e31c15f47fdae6a43af1d
3
+ size 15024
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34b023e05bc2d12b91dc436d4922b990d50ec8dc56d40dc3e36b3bb34fc81341
3
+ size 15024
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53f3e116edc7a4aaa795aa9d3305091b5d05b0faddc16441ba43419650b2148e
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "padding_side": "right",
237
+ "split_special_tokens": false,
238
+ "tokenizer_class": "Qwen2Tokenizer",
239
+ "unk_token": null
240
+ }
trainer_state.json ADDED
@@ -0,0 +1,1931 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 271,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0037011334721258385,
14
+ "grad_norm": 18.58395767211914,
15
+ "learning_rate": 0.0,
16
+ "loss": 2.0386,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.007402266944251677,
21
+ "grad_norm": 18.265050888061523,
22
+ "learning_rate": 2.439024390243903e-07,
23
+ "loss": 2.0567,
24
+ "step": 2
25
+ },
26
+ {
27
+ "epoch": 0.011103400416377515,
28
+ "grad_norm": 17.878551483154297,
29
+ "learning_rate": 4.878048780487805e-07,
30
+ "loss": 2.0552,
31
+ "step": 3
32
+ },
33
+ {
34
+ "epoch": 0.014804533888503354,
35
+ "grad_norm": 16.966453552246094,
36
+ "learning_rate": 7.317073170731707e-07,
37
+ "loss": 2.0109,
38
+ "step": 4
39
+ },
40
+ {
41
+ "epoch": 0.018505667360629193,
42
+ "grad_norm": 16.161401748657227,
43
+ "learning_rate": 9.75609756097561e-07,
44
+ "loss": 1.9672,
45
+ "step": 5
46
+ },
47
+ {
48
+ "epoch": 0.02220680083275503,
49
+ "grad_norm": 15.977458953857422,
50
+ "learning_rate": 1.2195121951219514e-06,
51
+ "loss": 1.9682,
52
+ "step": 6
53
+ },
54
+ {
55
+ "epoch": 0.02590793430488087,
56
+ "grad_norm": 16.708574295043945,
57
+ "learning_rate": 1.4634146341463414e-06,
58
+ "loss": 2.0057,
59
+ "step": 7
60
+ },
61
+ {
62
+ "epoch": 0.02960906777700671,
63
+ "grad_norm": 14.177475929260254,
64
+ "learning_rate": 1.707317073170732e-06,
65
+ "loss": 1.9461,
66
+ "step": 8
67
+ },
68
+ {
69
+ "epoch": 0.033310201249132546,
70
+ "grad_norm": 12.158246994018555,
71
+ "learning_rate": 1.951219512195122e-06,
72
+ "loss": 1.9066,
73
+ "step": 9
74
+ },
75
+ {
76
+ "epoch": 0.037011334721258386,
77
+ "grad_norm": 8.045342445373535,
78
+ "learning_rate": 2.1951219512195125e-06,
79
+ "loss": 1.8056,
80
+ "step": 10
81
+ },
82
+ {
83
+ "epoch": 0.04071246819338423,
84
+ "grad_norm": 7.844163417816162,
85
+ "learning_rate": 2.4390243902439027e-06,
86
+ "loss": 1.8378,
87
+ "step": 11
88
+ },
89
+ {
90
+ "epoch": 0.04441360166551006,
91
+ "grad_norm": 7.03815221786499,
92
+ "learning_rate": 2.682926829268293e-06,
93
+ "loss": 1.8033,
94
+ "step": 12
95
+ },
96
+ {
97
+ "epoch": 0.0481147351376359,
98
+ "grad_norm": 3.075176239013672,
99
+ "learning_rate": 2.926829268292683e-06,
100
+ "loss": 1.6791,
101
+ "step": 13
102
+ },
103
+ {
104
+ "epoch": 0.05181586860976174,
105
+ "grad_norm": 2.8847968578338623,
106
+ "learning_rate": 3.1707317073170736e-06,
107
+ "loss": 1.669,
108
+ "step": 14
109
+ },
110
+ {
111
+ "epoch": 0.055517002081887576,
112
+ "grad_norm": 2.814876079559326,
113
+ "learning_rate": 3.414634146341464e-06,
114
+ "loss": 1.6593,
115
+ "step": 15
116
+ },
117
+ {
118
+ "epoch": 0.05921813555401342,
119
+ "grad_norm": 2.5315568447113037,
120
+ "learning_rate": 3.6585365853658537e-06,
121
+ "loss": 1.6243,
122
+ "step": 16
123
+ },
124
+ {
125
+ "epoch": 0.06291926902613926,
126
+ "grad_norm": 2.201308488845825,
127
+ "learning_rate": 3.902439024390244e-06,
128
+ "loss": 1.635,
129
+ "step": 17
130
+ },
131
+ {
132
+ "epoch": 0.06662040249826509,
133
+ "grad_norm": 3.451822280883789,
134
+ "learning_rate": 4.146341463414634e-06,
135
+ "loss": 1.5684,
136
+ "step": 18
137
+ },
138
+ {
139
+ "epoch": 0.07032153597039094,
140
+ "grad_norm": 3.2633132934570312,
141
+ "learning_rate": 4.390243902439025e-06,
142
+ "loss": 1.5289,
143
+ "step": 19
144
+ },
145
+ {
146
+ "epoch": 0.07402266944251677,
147
+ "grad_norm": 2.946645736694336,
148
+ "learning_rate": 4.634146341463416e-06,
149
+ "loss": 1.5374,
150
+ "step": 20
151
+ },
152
+ {
153
+ "epoch": 0.0777238029146426,
154
+ "grad_norm": 2.211390256881714,
155
+ "learning_rate": 4.8780487804878055e-06,
156
+ "loss": 1.5406,
157
+ "step": 21
158
+ },
159
+ {
160
+ "epoch": 0.08142493638676845,
161
+ "grad_norm": 1.769932746887207,
162
+ "learning_rate": 5.121951219512195e-06,
163
+ "loss": 1.5043,
164
+ "step": 22
165
+ },
166
+ {
167
+ "epoch": 0.08512606985889429,
168
+ "grad_norm": 1.4464064836502075,
169
+ "learning_rate": 5.365853658536586e-06,
170
+ "loss": 1.5019,
171
+ "step": 23
172
+ },
173
+ {
174
+ "epoch": 0.08882720333102012,
175
+ "grad_norm": 1.2041360139846802,
176
+ "learning_rate": 5.609756097560977e-06,
177
+ "loss": 1.4461,
178
+ "step": 24
179
+ },
180
+ {
181
+ "epoch": 0.09252833680314597,
182
+ "grad_norm": 1.348673701286316,
183
+ "learning_rate": 5.853658536585366e-06,
184
+ "loss": 1.4275,
185
+ "step": 25
186
+ },
187
+ {
188
+ "epoch": 0.0962294702752718,
189
+ "grad_norm": 1.339782476425171,
190
+ "learning_rate": 6.0975609756097564e-06,
191
+ "loss": 1.463,
192
+ "step": 26
193
+ },
194
+ {
195
+ "epoch": 0.09993060374739764,
196
+ "grad_norm": 1.1694879531860352,
197
+ "learning_rate": 6.341463414634147e-06,
198
+ "loss": 1.3685,
199
+ "step": 27
200
+ },
201
+ {
202
+ "epoch": 0.10363173721952348,
203
+ "grad_norm": 0.9541494250297546,
204
+ "learning_rate": 6.585365853658538e-06,
205
+ "loss": 1.3661,
206
+ "step": 28
207
+ },
208
+ {
209
+ "epoch": 0.10733287069164932,
210
+ "grad_norm": 0.7883294224739075,
211
+ "learning_rate": 6.829268292682928e-06,
212
+ "loss": 1.3699,
213
+ "step": 29
214
+ },
215
+ {
216
+ "epoch": 0.11103400416377515,
217
+ "grad_norm": 0.7608742117881775,
218
+ "learning_rate": 7.0731707317073175e-06,
219
+ "loss": 1.3642,
220
+ "step": 30
221
+ },
222
+ {
223
+ "epoch": 0.114735137635901,
224
+ "grad_norm": 0.7347314953804016,
225
+ "learning_rate": 7.317073170731707e-06,
226
+ "loss": 1.2965,
227
+ "step": 31
228
+ },
229
+ {
230
+ "epoch": 0.11843627110802683,
231
+ "grad_norm": 0.7450265288352966,
232
+ "learning_rate": 7.560975609756098e-06,
233
+ "loss": 1.334,
234
+ "step": 32
235
+ },
236
+ {
237
+ "epoch": 0.12213740458015267,
238
+ "grad_norm": 0.7191912531852722,
239
+ "learning_rate": 7.804878048780489e-06,
240
+ "loss": 1.3671,
241
+ "step": 33
242
+ },
243
+ {
244
+ "epoch": 0.12583853805227851,
245
+ "grad_norm": 0.6597228646278381,
246
+ "learning_rate": 8.048780487804879e-06,
247
+ "loss": 1.3567,
248
+ "step": 34
249
+ },
250
+ {
251
+ "epoch": 0.12953967152440435,
252
+ "grad_norm": 0.6044723391532898,
253
+ "learning_rate": 8.292682926829268e-06,
254
+ "loss": 1.3341,
255
+ "step": 35
256
+ },
257
+ {
258
+ "epoch": 0.13324080499653018,
259
+ "grad_norm": 0.608441948890686,
260
+ "learning_rate": 8.536585365853658e-06,
261
+ "loss": 1.3191,
262
+ "step": 36
263
+ },
264
+ {
265
+ "epoch": 0.13694193846865602,
266
+ "grad_norm": 0.5608295798301697,
267
+ "learning_rate": 8.78048780487805e-06,
268
+ "loss": 1.2661,
269
+ "step": 37
270
+ },
271
+ {
272
+ "epoch": 0.14064307194078188,
273
+ "grad_norm": 0.524551272392273,
274
+ "learning_rate": 9.02439024390244e-06,
275
+ "loss": 1.2925,
276
+ "step": 38
277
+ },
278
+ {
279
+ "epoch": 0.1443442054129077,
280
+ "grad_norm": 0.5089970231056213,
281
+ "learning_rate": 9.268292682926831e-06,
282
+ "loss": 1.2661,
283
+ "step": 39
284
+ },
285
+ {
286
+ "epoch": 0.14804533888503354,
287
+ "grad_norm": 0.5608710050582886,
288
+ "learning_rate": 9.51219512195122e-06,
289
+ "loss": 1.3011,
290
+ "step": 40
291
+ },
292
+ {
293
+ "epoch": 0.15174647235715938,
294
+ "grad_norm": 0.5306675434112549,
295
+ "learning_rate": 9.756097560975611e-06,
296
+ "loss": 1.268,
297
+ "step": 41
298
+ },
299
+ {
300
+ "epoch": 0.1554476058292852,
301
+ "grad_norm": 0.5117089152336121,
302
+ "learning_rate": 1e-05,
303
+ "loss": 1.2685,
304
+ "step": 42
305
+ },
306
+ {
307
+ "epoch": 0.15914873930141105,
308
+ "grad_norm": 0.4487687647342682,
309
+ "learning_rate": 1.024390243902439e-05,
310
+ "loss": 1.2687,
311
+ "step": 43
312
+ },
313
+ {
314
+ "epoch": 0.1628498727735369,
315
+ "grad_norm": 0.46755310893058777,
316
+ "learning_rate": 1.0487804878048782e-05,
317
+ "loss": 1.2394,
318
+ "step": 44
319
+ },
320
+ {
321
+ "epoch": 0.16655100624566274,
322
+ "grad_norm": 0.42293983697891235,
323
+ "learning_rate": 1.0731707317073172e-05,
324
+ "loss": 1.2567,
325
+ "step": 45
326
+ },
327
+ {
328
+ "epoch": 0.17025213971778858,
329
+ "grad_norm": 0.4018270969390869,
330
+ "learning_rate": 1.0975609756097562e-05,
331
+ "loss": 1.2472,
332
+ "step": 46
333
+ },
334
+ {
335
+ "epoch": 0.1739532731899144,
336
+ "grad_norm": 0.435199499130249,
337
+ "learning_rate": 1.1219512195121953e-05,
338
+ "loss": 1.271,
339
+ "step": 47
340
+ },
341
+ {
342
+ "epoch": 0.17765440666204024,
343
+ "grad_norm": 0.43060699105262756,
344
+ "learning_rate": 1.1463414634146342e-05,
345
+ "loss": 1.271,
346
+ "step": 48
347
+ },
348
+ {
349
+ "epoch": 0.18135554013416608,
350
+ "grad_norm": 0.39703771471977234,
351
+ "learning_rate": 1.1707317073170731e-05,
352
+ "loss": 1.2637,
353
+ "step": 49
354
+ },
355
+ {
356
+ "epoch": 0.18505667360629194,
357
+ "grad_norm": 0.4147438704967499,
358
+ "learning_rate": 1.1951219512195123e-05,
359
+ "loss": 1.2618,
360
+ "step": 50
361
+ },
362
+ {
363
+ "epoch": 0.18875780707841777,
364
+ "grad_norm": 0.39236122369766235,
365
+ "learning_rate": 1.2195121951219513e-05,
366
+ "loss": 1.2319,
367
+ "step": 51
368
+ },
369
+ {
370
+ "epoch": 0.1924589405505436,
371
+ "grad_norm": 0.37801605463027954,
372
+ "learning_rate": 1.2439024390243903e-05,
373
+ "loss": 1.2331,
374
+ "step": 52
375
+ },
376
+ {
377
+ "epoch": 0.19616007402266944,
378
+ "grad_norm": 0.40901511907577515,
379
+ "learning_rate": 1.2682926829268294e-05,
380
+ "loss": 1.2287,
381
+ "step": 53
382
+ },
383
+ {
384
+ "epoch": 0.19986120749479527,
385
+ "grad_norm": 0.39218148589134216,
386
+ "learning_rate": 1.2926829268292684e-05,
387
+ "loss": 1.2329,
388
+ "step": 54
389
+ },
390
+ {
391
+ "epoch": 0.2035623409669211,
392
+ "grad_norm": 0.36160245537757874,
393
+ "learning_rate": 1.3170731707317076e-05,
394
+ "loss": 1.2418,
395
+ "step": 55
396
+ },
397
+ {
398
+ "epoch": 0.20726347443904697,
399
+ "grad_norm": 0.3971434235572815,
400
+ "learning_rate": 1.3414634146341466e-05,
401
+ "loss": 1.2105,
402
+ "step": 56
403
+ },
404
+ {
405
+ "epoch": 0.2109646079111728,
406
+ "grad_norm": 0.3966448903083801,
407
+ "learning_rate": 1.3658536585365855e-05,
408
+ "loss": 1.2237,
409
+ "step": 57
410
+ },
411
+ {
412
+ "epoch": 0.21466574138329864,
413
+ "grad_norm": 0.38691452145576477,
414
+ "learning_rate": 1.3902439024390244e-05,
415
+ "loss": 1.2103,
416
+ "step": 58
417
+ },
418
+ {
419
+ "epoch": 0.21836687485542447,
420
+ "grad_norm": 0.3971393406391144,
421
+ "learning_rate": 1.4146341463414635e-05,
422
+ "loss": 1.2076,
423
+ "step": 59
424
+ },
425
+ {
426
+ "epoch": 0.2220680083275503,
427
+ "grad_norm": 0.38985714316368103,
428
+ "learning_rate": 1.4390243902439025e-05,
429
+ "loss": 1.1594,
430
+ "step": 60
431
+ },
432
+ {
433
+ "epoch": 0.22576914179967614,
434
+ "grad_norm": 0.3863939642906189,
435
+ "learning_rate": 1.4634146341463415e-05,
436
+ "loss": 1.212,
437
+ "step": 61
438
+ },
439
+ {
440
+ "epoch": 0.229470275271802,
441
+ "grad_norm": 0.4149441719055176,
442
+ "learning_rate": 1.4878048780487806e-05,
443
+ "loss": 1.2397,
444
+ "step": 62
445
+ },
446
+ {
447
+ "epoch": 0.23317140874392783,
448
+ "grad_norm": 0.43418800830841064,
449
+ "learning_rate": 1.5121951219512196e-05,
450
+ "loss": 1.2596,
451
+ "step": 63
452
+ },
453
+ {
454
+ "epoch": 0.23687254221605367,
455
+ "grad_norm": 0.36480239033699036,
456
+ "learning_rate": 1.5365853658536586e-05,
457
+ "loss": 1.2457,
458
+ "step": 64
459
+ },
460
+ {
461
+ "epoch": 0.2405736756881795,
462
+ "grad_norm": 0.39555197954177856,
463
+ "learning_rate": 1.5609756097560978e-05,
464
+ "loss": 1.1896,
465
+ "step": 65
466
+ },
467
+ {
468
+ "epoch": 0.24427480916030533,
469
+ "grad_norm": 0.37620991468429565,
470
+ "learning_rate": 1.585365853658537e-05,
471
+ "loss": 1.2087,
472
+ "step": 66
473
+ },
474
+ {
475
+ "epoch": 0.2479759426324312,
476
+ "grad_norm": 0.3800138831138611,
477
+ "learning_rate": 1.6097560975609757e-05,
478
+ "loss": 1.2376,
479
+ "step": 67
480
+ },
481
+ {
482
+ "epoch": 0.25167707610455703,
483
+ "grad_norm": 0.42988261580467224,
484
+ "learning_rate": 1.6341463414634145e-05,
485
+ "loss": 1.2025,
486
+ "step": 68
487
+ },
488
+ {
489
+ "epoch": 0.25537820957668284,
490
+ "grad_norm": 0.37770283222198486,
491
+ "learning_rate": 1.6585365853658537e-05,
492
+ "loss": 1.2234,
493
+ "step": 69
494
+ },
495
+ {
496
+ "epoch": 0.2590793430488087,
497
+ "grad_norm": 0.47229480743408203,
498
+ "learning_rate": 1.682926829268293e-05,
499
+ "loss": 1.219,
500
+ "step": 70
501
+ },
502
+ {
503
+ "epoch": 0.26278047652093456,
504
+ "grad_norm": 0.4039227068424225,
505
+ "learning_rate": 1.7073170731707317e-05,
506
+ "loss": 1.1951,
507
+ "step": 71
508
+ },
509
+ {
510
+ "epoch": 0.26648160999306036,
511
+ "grad_norm": 0.45078909397125244,
512
+ "learning_rate": 1.7317073170731708e-05,
513
+ "loss": 1.1876,
514
+ "step": 72
515
+ },
516
+ {
517
+ "epoch": 0.2701827434651862,
518
+ "grad_norm": 0.41969749331474304,
519
+ "learning_rate": 1.75609756097561e-05,
520
+ "loss": 1.1933,
521
+ "step": 73
522
+ },
523
+ {
524
+ "epoch": 0.27388387693731203,
525
+ "grad_norm": 0.43968865275382996,
526
+ "learning_rate": 1.7804878048780488e-05,
527
+ "loss": 1.206,
528
+ "step": 74
529
+ },
530
+ {
531
+ "epoch": 0.2775850104094379,
532
+ "grad_norm": 0.4713888466358185,
533
+ "learning_rate": 1.804878048780488e-05,
534
+ "loss": 1.2222,
535
+ "step": 75
536
+ },
537
+ {
538
+ "epoch": 0.28128614388156375,
539
+ "grad_norm": 0.4152630865573883,
540
+ "learning_rate": 1.829268292682927e-05,
541
+ "loss": 1.1792,
542
+ "step": 76
543
+ },
544
+ {
545
+ "epoch": 0.28498727735368956,
546
+ "grad_norm": 0.43539732694625854,
547
+ "learning_rate": 1.8536585365853663e-05,
548
+ "loss": 1.1674,
549
+ "step": 77
550
+ },
551
+ {
552
+ "epoch": 0.2886884108258154,
553
+ "grad_norm": 0.427062451839447,
554
+ "learning_rate": 1.878048780487805e-05,
555
+ "loss": 1.1972,
556
+ "step": 78
557
+ },
558
+ {
559
+ "epoch": 0.29238954429794123,
560
+ "grad_norm": 0.4538004696369171,
561
+ "learning_rate": 1.902439024390244e-05,
562
+ "loss": 1.174,
563
+ "step": 79
564
+ },
565
+ {
566
+ "epoch": 0.2960906777700671,
567
+ "grad_norm": 0.4255438446998596,
568
+ "learning_rate": 1.926829268292683e-05,
569
+ "loss": 1.1903,
570
+ "step": 80
571
+ },
572
+ {
573
+ "epoch": 0.2997918112421929,
574
+ "grad_norm": 0.44163715839385986,
575
+ "learning_rate": 1.9512195121951222e-05,
576
+ "loss": 1.1961,
577
+ "step": 81
578
+ },
579
+ {
580
+ "epoch": 0.30349294471431876,
581
+ "grad_norm": 0.47783321142196655,
582
+ "learning_rate": 1.975609756097561e-05,
583
+ "loss": 1.1797,
584
+ "step": 82
585
+ },
586
+ {
587
+ "epoch": 0.3071940781864446,
588
+ "grad_norm": 0.524274468421936,
589
+ "learning_rate": 2e-05,
590
+ "loss": 1.2058,
591
+ "step": 83
592
+ },
593
+ {
594
+ "epoch": 0.3108952116585704,
595
+ "grad_norm": 0.43504446744918823,
596
+ "learning_rate": 1.9999907650547006e-05,
597
+ "loss": 1.1561,
598
+ "step": 84
599
+ },
600
+ {
601
+ "epoch": 0.3145963451306963,
602
+ "grad_norm": 0.5196582674980164,
603
+ "learning_rate": 1.999963060389371e-05,
604
+ "loss": 1.2008,
605
+ "step": 85
606
+ },
607
+ {
608
+ "epoch": 0.3182974786028221,
609
+ "grad_norm": 0.48404446244239807,
610
+ "learning_rate": 1.9999168865157137e-05,
611
+ "loss": 1.1837,
612
+ "step": 86
613
+ },
614
+ {
615
+ "epoch": 0.32199861207494795,
616
+ "grad_norm": 0.5402565598487854,
617
+ "learning_rate": 1.999852244286554e-05,
618
+ "loss": 1.1739,
619
+ "step": 87
620
+ },
621
+ {
622
+ "epoch": 0.3256997455470738,
623
+ "grad_norm": 0.45610666275024414,
624
+ "learning_rate": 1.9997691348958278e-05,
625
+ "loss": 1.2023,
626
+ "step": 88
627
+ },
628
+ {
629
+ "epoch": 0.3294008790191996,
630
+ "grad_norm": 0.5024977922439575,
631
+ "learning_rate": 1.999667559878556e-05,
632
+ "loss": 1.2208,
633
+ "step": 89
634
+ },
635
+ {
636
+ "epoch": 0.3331020124913255,
637
+ "grad_norm": 0.47227853536605835,
638
+ "learning_rate": 1.9995475211108183e-05,
639
+ "loss": 1.1853,
640
+ "step": 90
641
+ },
642
+ {
643
+ "epoch": 0.3368031459634513,
644
+ "grad_norm": 0.47440260648727417,
645
+ "learning_rate": 1.9994090208097176e-05,
646
+ "loss": 1.1841,
647
+ "step": 91
648
+ },
649
+ {
650
+ "epoch": 0.34050427943557715,
651
+ "grad_norm": 0.4835188090801239,
652
+ "learning_rate": 1.9992520615333393e-05,
653
+ "loss": 1.2107,
654
+ "step": 92
655
+ },
656
+ {
657
+ "epoch": 0.34420541290770296,
658
+ "grad_norm": 0.4680016338825226,
659
+ "learning_rate": 1.9990766461807037e-05,
660
+ "loss": 1.165,
661
+ "step": 93
662
+ },
663
+ {
664
+ "epoch": 0.3479065463798288,
665
+ "grad_norm": 0.4826832115650177,
666
+ "learning_rate": 1.9988827779917138e-05,
667
+ "loss": 1.153,
668
+ "step": 94
669
+ },
670
+ {
671
+ "epoch": 0.3516076798519547,
672
+ "grad_norm": 0.4597121775150299,
673
+ "learning_rate": 1.9986704605470932e-05,
674
+ "loss": 1.1786,
675
+ "step": 95
676
+ },
677
+ {
678
+ "epoch": 0.3553088133240805,
679
+ "grad_norm": 0.4732728898525238,
680
+ "learning_rate": 1.9984396977683223e-05,
681
+ "loss": 1.2066,
682
+ "step": 96
683
+ },
684
+ {
685
+ "epoch": 0.35900994679620635,
686
+ "grad_norm": 0.5414657592773438,
687
+ "learning_rate": 1.998190493917564e-05,
688
+ "loss": 1.1748,
689
+ "step": 97
690
+ },
691
+ {
692
+ "epoch": 0.36271108026833215,
693
+ "grad_norm": 0.46830272674560547,
694
+ "learning_rate": 1.9979228535975866e-05,
695
+ "loss": 1.159,
696
+ "step": 98
697
+ },
698
+ {
699
+ "epoch": 0.366412213740458,
700
+ "grad_norm": 0.4553397297859192,
701
+ "learning_rate": 1.9976367817516773e-05,
702
+ "loss": 1.1862,
703
+ "step": 99
704
+ },
705
+ {
706
+ "epoch": 0.3701133472125839,
707
+ "grad_norm": 0.4947619140148163,
708
+ "learning_rate": 1.9973322836635517e-05,
709
+ "loss": 1.172,
710
+ "step": 100
711
+ },
712
+ {
713
+ "epoch": 0.3738144806847097,
714
+ "grad_norm": 0.4160066246986389,
715
+ "learning_rate": 1.9970093649572567e-05,
716
+ "loss": 1.174,
717
+ "step": 101
718
+ },
719
+ {
720
+ "epoch": 0.37751561415683554,
721
+ "grad_norm": 0.44229212403297424,
722
+ "learning_rate": 1.9966680315970647e-05,
723
+ "loss": 1.1987,
724
+ "step": 102
725
+ },
726
+ {
727
+ "epoch": 0.38121674762896135,
728
+ "grad_norm": 0.4374721944332123,
729
+ "learning_rate": 1.996308289887366e-05,
730
+ "loss": 1.1773,
731
+ "step": 103
732
+ },
733
+ {
734
+ "epoch": 0.3849178811010872,
735
+ "grad_norm": 0.45597878098487854,
736
+ "learning_rate": 1.9959301464725507e-05,
737
+ "loss": 1.1696,
738
+ "step": 104
739
+ },
740
+ {
741
+ "epoch": 0.3886190145732131,
742
+ "grad_norm": 0.43197211623191833,
743
+ "learning_rate": 1.995533608336886e-05,
744
+ "loss": 1.1528,
745
+ "step": 105
746
+ },
747
+ {
748
+ "epoch": 0.3923201480453389,
749
+ "grad_norm": 0.4520713686943054,
750
+ "learning_rate": 1.995118682804388e-05,
751
+ "loss": 1.1522,
752
+ "step": 106
753
+ },
754
+ {
755
+ "epoch": 0.39602128151746474,
756
+ "grad_norm": 0.4522034525871277,
757
+ "learning_rate": 1.9946853775386857e-05,
758
+ "loss": 1.1657,
759
+ "step": 107
760
+ },
761
+ {
762
+ "epoch": 0.39972241498959055,
763
+ "grad_norm": 0.5125685930252075,
764
+ "learning_rate": 1.9942337005428805e-05,
765
+ "loss": 1.1875,
766
+ "step": 108
767
+ },
768
+ {
769
+ "epoch": 0.4034235484617164,
770
+ "grad_norm": 0.43560755252838135,
771
+ "learning_rate": 1.9937636601593965e-05,
772
+ "loss": 1.1501,
773
+ "step": 109
774
+ },
775
+ {
776
+ "epoch": 0.4071246819338422,
777
+ "grad_norm": 0.44878366589546204,
778
+ "learning_rate": 1.9932752650698285e-05,
779
+ "loss": 1.1649,
780
+ "step": 110
781
+ },
782
+ {
783
+ "epoch": 0.4108258154059681,
784
+ "grad_norm": 0.4889513850212097,
785
+ "learning_rate": 1.9927685242947804e-05,
786
+ "loss": 1.1736,
787
+ "step": 111
788
+ },
789
+ {
790
+ "epoch": 0.41452694887809394,
791
+ "grad_norm": 0.42187580466270447,
792
+ "learning_rate": 1.9922434471936987e-05,
793
+ "loss": 1.1463,
794
+ "step": 112
795
+ },
796
+ {
797
+ "epoch": 0.41822808235021974,
798
+ "grad_norm": 0.435621976852417,
799
+ "learning_rate": 1.9917000434647e-05,
800
+ "loss": 1.1582,
801
+ "step": 113
802
+ },
803
+ {
804
+ "epoch": 0.4219292158223456,
805
+ "grad_norm": 0.47740185260772705,
806
+ "learning_rate": 1.991138323144392e-05,
807
+ "loss": 1.1585,
808
+ "step": 114
809
+ },
810
+ {
811
+ "epoch": 0.4256303492944714,
812
+ "grad_norm": 0.44095510244369507,
813
+ "learning_rate": 1.990558296607687e-05,
814
+ "loss": 1.1787,
815
+ "step": 115
816
+ },
817
+ {
818
+ "epoch": 0.42933148276659727,
819
+ "grad_norm": 0.44858500361442566,
820
+ "learning_rate": 1.9899599745676123e-05,
821
+ "loss": 1.1428,
822
+ "step": 116
823
+ },
824
+ {
825
+ "epoch": 0.43303261623872313,
826
+ "grad_norm": 0.4960203766822815,
827
+ "learning_rate": 1.9893433680751105e-05,
828
+ "loss": 1.174,
829
+ "step": 117
830
+ },
831
+ {
832
+ "epoch": 0.43673374971084894,
833
+ "grad_norm": 0.45929455757141113,
834
+ "learning_rate": 1.9887084885188354e-05,
835
+ "loss": 1.1903,
836
+ "step": 118
837
+ },
838
+ {
839
+ "epoch": 0.4404348831829748,
840
+ "grad_norm": 0.4894520342350006,
841
+ "learning_rate": 1.9880553476249437e-05,
842
+ "loss": 1.1517,
843
+ "step": 119
844
+ },
845
+ {
846
+ "epoch": 0.4441360166551006,
847
+ "grad_norm": 0.4524790048599243,
848
+ "learning_rate": 1.9873839574568756e-05,
849
+ "loss": 1.1215,
850
+ "step": 120
851
+ },
852
+ {
853
+ "epoch": 0.44783715012722647,
854
+ "grad_norm": 0.446846067905426,
855
+ "learning_rate": 1.9866943304151346e-05,
856
+ "loss": 1.1471,
857
+ "step": 121
858
+ },
859
+ {
860
+ "epoch": 0.4515382835993523,
861
+ "grad_norm": 0.43665164709091187,
862
+ "learning_rate": 1.9859864792370565e-05,
863
+ "loss": 1.1369,
864
+ "step": 122
865
+ },
866
+ {
867
+ "epoch": 0.45523941707147814,
868
+ "grad_norm": 0.45532330870628357,
869
+ "learning_rate": 1.985260416996575e-05,
870
+ "loss": 1.1714,
871
+ "step": 123
872
+ },
873
+ {
874
+ "epoch": 0.458940550543604,
875
+ "grad_norm": 0.5525180101394653,
876
+ "learning_rate": 1.9845161571039805e-05,
877
+ "loss": 1.1394,
878
+ "step": 124
879
+ },
880
+ {
881
+ "epoch": 0.4626416840157298,
882
+ "grad_norm": 0.5021325349807739,
883
+ "learning_rate": 1.983753713305672e-05,
884
+ "loss": 1.1804,
885
+ "step": 125
886
+ },
887
+ {
888
+ "epoch": 0.46634281748785567,
889
+ "grad_norm": 0.45662885904312134,
890
+ "learning_rate": 1.982973099683902e-05,
891
+ "loss": 1.1423,
892
+ "step": 126
893
+ },
894
+ {
895
+ "epoch": 0.47004395095998147,
896
+ "grad_norm": 0.489346444606781,
897
+ "learning_rate": 1.98217433065652e-05,
898
+ "loss": 1.1544,
899
+ "step": 127
900
+ },
901
+ {
902
+ "epoch": 0.47374508443210733,
903
+ "grad_norm": 0.5123556852340698,
904
+ "learning_rate": 1.9813574209767013e-05,
905
+ "loss": 1.1704,
906
+ "step": 128
907
+ },
908
+ {
909
+ "epoch": 0.4774462179042332,
910
+ "grad_norm": 0.4498573839664459,
911
+ "learning_rate": 1.9805223857326794e-05,
912
+ "loss": 1.1672,
913
+ "step": 129
914
+ },
915
+ {
916
+ "epoch": 0.481147351376359,
917
+ "grad_norm": 0.5040275454521179,
918
+ "learning_rate": 1.9796692403474632e-05,
919
+ "loss": 1.1436,
920
+ "step": 130
921
+ },
922
+ {
923
+ "epoch": 0.48484848484848486,
924
+ "grad_norm": 0.48043787479400635,
925
+ "learning_rate": 1.9787980005785553e-05,
926
+ "loss": 1.1744,
927
+ "step": 131
928
+ },
929
+ {
930
+ "epoch": 0.48854961832061067,
931
+ "grad_norm": 0.5542130470275879,
932
+ "learning_rate": 1.977908682517658e-05,
933
+ "loss": 1.1403,
934
+ "step": 132
935
+ },
936
+ {
937
+ "epoch": 0.49225075179273653,
938
+ "grad_norm": 0.4555543065071106,
939
+ "learning_rate": 1.9770013025903797e-05,
940
+ "loss": 1.1437,
941
+ "step": 133
942
+ },
943
+ {
944
+ "epoch": 0.4959518852648624,
945
+ "grad_norm": 0.4997326135635376,
946
+ "learning_rate": 1.9760758775559275e-05,
947
+ "loss": 1.1239,
948
+ "step": 134
949
+ },
950
+ {
951
+ "epoch": 0.4996530187369882,
952
+ "grad_norm": 0.470058798789978,
953
+ "learning_rate": 1.9751324245068008e-05,
954
+ "loss": 1.1752,
955
+ "step": 135
956
+ },
957
+ {
958
+ "epoch": 0.5033541522091141,
959
+ "grad_norm": 0.47833555936813354,
960
+ "learning_rate": 1.974170960868474e-05,
961
+ "loss": 1.1652,
962
+ "step": 136
963
+ },
964
+ {
965
+ "epoch": 0.5070552856812399,
966
+ "grad_norm": 0.46740809082984924,
967
+ "learning_rate": 1.973191504399076e-05,
968
+ "loss": 1.1243,
969
+ "step": 137
970
+ },
971
+ {
972
+ "epoch": 0.5107564191533657,
973
+ "grad_norm": 0.4260287582874298,
974
+ "learning_rate": 1.97219407318906e-05,
975
+ "loss": 1.1457,
976
+ "step": 138
977
+ },
978
+ {
979
+ "epoch": 0.5144575526254915,
980
+ "grad_norm": 0.4864775538444519,
981
+ "learning_rate": 1.9711786856608714e-05,
982
+ "loss": 1.1405,
983
+ "step": 139
984
+ },
985
+ {
986
+ "epoch": 0.5181586860976174,
987
+ "grad_norm": 0.4291229844093323,
988
+ "learning_rate": 1.970145360568607e-05,
989
+ "loss": 1.1906,
990
+ "step": 140
991
+ },
992
+ {
993
+ "epoch": 0.5218598195697433,
994
+ "grad_norm": 0.5193659663200378,
995
+ "learning_rate": 1.969094116997668e-05,
996
+ "loss": 1.1328,
997
+ "step": 141
998
+ },
999
+ {
1000
+ "epoch": 0.5255609530418691,
1001
+ "grad_norm": 0.4072008430957794,
1002
+ "learning_rate": 1.968024974364408e-05,
1003
+ "loss": 1.1891,
1004
+ "step": 142
1005
+ },
1006
+ {
1007
+ "epoch": 0.5292620865139949,
1008
+ "grad_norm": 0.49957630038261414,
1009
+ "learning_rate": 1.9669379524157755e-05,
1010
+ "loss": 1.1667,
1011
+ "step": 143
1012
+ },
1013
+ {
1014
+ "epoch": 0.5329632199861207,
1015
+ "grad_norm": 0.46822240948677063,
1016
+ "learning_rate": 1.9658330712289456e-05,
1017
+ "loss": 1.1416,
1018
+ "step": 144
1019
+ },
1020
+ {
1021
+ "epoch": 0.5366643534582466,
1022
+ "grad_norm": 0.44118058681488037,
1023
+ "learning_rate": 1.9647103512109535e-05,
1024
+ "loss": 1.142,
1025
+ "step": 145
1026
+ },
1027
+ {
1028
+ "epoch": 0.5403654869303725,
1029
+ "grad_norm": 0.46672412753105164,
1030
+ "learning_rate": 1.9635698130983153e-05,
1031
+ "loss": 1.1702,
1032
+ "step": 146
1033
+ },
1034
+ {
1035
+ "epoch": 0.5440666204024983,
1036
+ "grad_norm": 0.4547111690044403,
1037
+ "learning_rate": 1.962411477956645e-05,
1038
+ "loss": 1.1495,
1039
+ "step": 147
1040
+ },
1041
+ {
1042
+ "epoch": 0.5477677538746241,
1043
+ "grad_norm": 0.4423418343067169,
1044
+ "learning_rate": 1.9612353671802658e-05,
1045
+ "loss": 1.128,
1046
+ "step": 148
1047
+ },
1048
+ {
1049
+ "epoch": 0.5514688873467499,
1050
+ "grad_norm": 0.49529922008514404,
1051
+ "learning_rate": 1.960041502491815e-05,
1052
+ "loss": 1.1488,
1053
+ "step": 149
1054
+ },
1055
+ {
1056
+ "epoch": 0.5551700208188758,
1057
+ "grad_norm": 0.4977218210697174,
1058
+ "learning_rate": 1.9588299059418434e-05,
1059
+ "loss": 1.1268,
1060
+ "step": 150
1061
+ },
1062
+ {
1063
+ "epoch": 0.5588711542910016,
1064
+ "grad_norm": 0.5749414563179016,
1065
+ "learning_rate": 1.957600599908406e-05,
1066
+ "loss": 1.1438,
1067
+ "step": 151
1068
+ },
1069
+ {
1070
+ "epoch": 0.5625722877631275,
1071
+ "grad_norm": 0.4806205630302429,
1072
+ "learning_rate": 1.9563536070966513e-05,
1073
+ "loss": 1.1278,
1074
+ "step": 152
1075
+ },
1076
+ {
1077
+ "epoch": 0.5662734212352533,
1078
+ "grad_norm": 0.5104363560676575,
1079
+ "learning_rate": 1.9550889505383996e-05,
1080
+ "loss": 1.1182,
1081
+ "step": 153
1082
+ },
1083
+ {
1084
+ "epoch": 0.5699745547073791,
1085
+ "grad_norm": 0.4537147879600525,
1086
+ "learning_rate": 1.9538066535917196e-05,
1087
+ "loss": 1.1617,
1088
+ "step": 154
1089
+ },
1090
+ {
1091
+ "epoch": 0.573675688179505,
1092
+ "grad_norm": 0.5351677536964417,
1093
+ "learning_rate": 1.952506739940496e-05,
1094
+ "loss": 1.1388,
1095
+ "step": 155
1096
+ },
1097
+ {
1098
+ "epoch": 0.5773768216516308,
1099
+ "grad_norm": 0.4276635944843292,
1100
+ "learning_rate": 1.9511892335939904e-05,
1101
+ "loss": 1.1267,
1102
+ "step": 156
1103
+ },
1104
+ {
1105
+ "epoch": 0.5810779551237566,
1106
+ "grad_norm": 0.5219061374664307,
1107
+ "learning_rate": 1.9498541588864022e-05,
1108
+ "loss": 1.1646,
1109
+ "step": 157
1110
+ },
1111
+ {
1112
+ "epoch": 0.5847790885958825,
1113
+ "grad_norm": 0.6277289986610413,
1114
+ "learning_rate": 1.948501540476414e-05,
1115
+ "loss": 1.1527,
1116
+ "step": 158
1117
+ },
1118
+ {
1119
+ "epoch": 0.5884802220680083,
1120
+ "grad_norm": 0.4811258614063263,
1121
+ "learning_rate": 1.9471314033467413e-05,
1122
+ "loss": 1.167,
1123
+ "step": 159
1124
+ },
1125
+ {
1126
+ "epoch": 0.5921813555401342,
1127
+ "grad_norm": 0.45152518153190613,
1128
+ "learning_rate": 1.945743772803666e-05,
1129
+ "loss": 1.1623,
1130
+ "step": 160
1131
+ },
1132
+ {
1133
+ "epoch": 0.59588248901226,
1134
+ "grad_norm": 0.4437843859195709,
1135
+ "learning_rate": 1.9443386744765726e-05,
1136
+ "loss": 1.1692,
1137
+ "step": 161
1138
+ },
1139
+ {
1140
+ "epoch": 0.5995836224843858,
1141
+ "grad_norm": 0.43210992217063904,
1142
+ "learning_rate": 1.942916134317473e-05,
1143
+ "loss": 1.1692,
1144
+ "step": 162
1145
+ },
1146
+ {
1147
+ "epoch": 0.6032847559565117,
1148
+ "grad_norm": 0.4339381456375122,
1149
+ "learning_rate": 1.9414761786005293e-05,
1150
+ "loss": 1.1394,
1151
+ "step": 163
1152
+ },
1153
+ {
1154
+ "epoch": 0.6069858894286375,
1155
+ "grad_norm": 0.44703447818756104,
1156
+ "learning_rate": 1.9400188339215657e-05,
1157
+ "loss": 1.1581,
1158
+ "step": 164
1159
+ },
1160
+ {
1161
+ "epoch": 0.6106870229007634,
1162
+ "grad_norm": 0.48046809434890747,
1163
+ "learning_rate": 1.9385441271975786e-05,
1164
+ "loss": 1.104,
1165
+ "step": 165
1166
+ },
1167
+ {
1168
+ "epoch": 0.6143881563728892,
1169
+ "grad_norm": 0.45704057812690735,
1170
+ "learning_rate": 1.9370520856662406e-05,
1171
+ "loss": 1.1412,
1172
+ "step": 166
1173
+ },
1174
+ {
1175
+ "epoch": 0.618089289845015,
1176
+ "grad_norm": 0.49565792083740234,
1177
+ "learning_rate": 1.9355427368853946e-05,
1178
+ "loss": 1.1873,
1179
+ "step": 167
1180
+ },
1181
+ {
1182
+ "epoch": 0.6217904233171409,
1183
+ "grad_norm": 0.49605411291122437,
1184
+ "learning_rate": 1.9340161087325483e-05,
1185
+ "loss": 1.1419,
1186
+ "step": 168
1187
+ },
1188
+ {
1189
+ "epoch": 0.6254915567892667,
1190
+ "grad_norm": 0.41736945509910583,
1191
+ "learning_rate": 1.932472229404356e-05,
1192
+ "loss": 1.1198,
1193
+ "step": 169
1194
+ },
1195
+ {
1196
+ "epoch": 0.6291926902613926,
1197
+ "grad_norm": 0.4974633753299713,
1198
+ "learning_rate": 1.9309111274161005e-05,
1199
+ "loss": 1.173,
1200
+ "step": 170
1201
+ },
1202
+ {
1203
+ "epoch": 0.6328938237335184,
1204
+ "grad_norm": 0.4752466678619385,
1205
+ "learning_rate": 1.9293328316011645e-05,
1206
+ "loss": 1.1442,
1207
+ "step": 171
1208
+ },
1209
+ {
1210
+ "epoch": 0.6365949572056442,
1211
+ "grad_norm": 0.44637349247932434,
1212
+ "learning_rate": 1.927737371110499e-05,
1213
+ "loss": 1.1357,
1214
+ "step": 172
1215
+ },
1216
+ {
1217
+ "epoch": 0.64029609067777,
1218
+ "grad_norm": 0.5463848114013672,
1219
+ "learning_rate": 1.9261247754120846e-05,
1220
+ "loss": 1.1143,
1221
+ "step": 173
1222
+ },
1223
+ {
1224
+ "epoch": 0.6439972241498959,
1225
+ "grad_norm": 0.43191689252853394,
1226
+ "learning_rate": 1.924495074290388e-05,
1227
+ "loss": 1.1357,
1228
+ "step": 174
1229
+ },
1230
+ {
1231
+ "epoch": 0.6476983576220218,
1232
+ "grad_norm": 0.5463783740997314,
1233
+ "learning_rate": 1.92284829784581e-05,
1234
+ "loss": 1.1117,
1235
+ "step": 175
1236
+ },
1237
+ {
1238
+ "epoch": 0.6513994910941476,
1239
+ "grad_norm": 0.5015990734100342,
1240
+ "learning_rate": 1.9211844764941318e-05,
1241
+ "loss": 1.1405,
1242
+ "step": 176
1243
+ },
1244
+ {
1245
+ "epoch": 0.6551006245662734,
1246
+ "grad_norm": 0.492009699344635,
1247
+ "learning_rate": 1.919503640965951e-05,
1248
+ "loss": 1.1621,
1249
+ "step": 177
1250
+ },
1251
+ {
1252
+ "epoch": 0.6588017580383992,
1253
+ "grad_norm": 0.529156506061554,
1254
+ "learning_rate": 1.917805822306117e-05,
1255
+ "loss": 1.1778,
1256
+ "step": 178
1257
+ },
1258
+ {
1259
+ "epoch": 0.6625028915105251,
1260
+ "grad_norm": 0.4173935353755951,
1261
+ "learning_rate": 1.916091051873154e-05,
1262
+ "loss": 1.1142,
1263
+ "step": 179
1264
+ },
1265
+ {
1266
+ "epoch": 0.666204024982651,
1267
+ "grad_norm": 0.4969713091850281,
1268
+ "learning_rate": 1.9143593613386845e-05,
1269
+ "loss": 1.1195,
1270
+ "step": 180
1271
+ },
1272
+ {
1273
+ "epoch": 0.6699051584547768,
1274
+ "grad_norm": 0.5533036589622498,
1275
+ "learning_rate": 1.9126107826868436e-05,
1276
+ "loss": 1.1436,
1277
+ "step": 181
1278
+ },
1279
+ {
1280
+ "epoch": 0.6736062919269026,
1281
+ "grad_norm": 0.5528081059455872,
1282
+ "learning_rate": 1.9108453482136866e-05,
1283
+ "loss": 1.1738,
1284
+ "step": 182
1285
+ },
1286
+ {
1287
+ "epoch": 0.6773074253990284,
1288
+ "grad_norm": 0.4741283357143402,
1289
+ "learning_rate": 1.9090630905265963e-05,
1290
+ "loss": 1.1766,
1291
+ "step": 183
1292
+ },
1293
+ {
1294
+ "epoch": 0.6810085588711543,
1295
+ "grad_norm": 0.5676066279411316,
1296
+ "learning_rate": 1.9072640425436762e-05,
1297
+ "loss": 1.1437,
1298
+ "step": 184
1299
+ },
1300
+ {
1301
+ "epoch": 0.6847096923432802,
1302
+ "grad_norm": 0.4501611292362213,
1303
+ "learning_rate": 1.905448237493147e-05,
1304
+ "loss": 1.1434,
1305
+ "step": 185
1306
+ },
1307
+ {
1308
+ "epoch": 0.6884108258154059,
1309
+ "grad_norm": 0.542989194393158,
1310
+ "learning_rate": 1.9036157089127278e-05,
1311
+ "loss": 1.1482,
1312
+ "step": 186
1313
+ },
1314
+ {
1315
+ "epoch": 0.6921119592875318,
1316
+ "grad_norm": 0.5076568126678467,
1317
+ "learning_rate": 1.901766490649022e-05,
1318
+ "loss": 1.1319,
1319
+ "step": 187
1320
+ },
1321
+ {
1322
+ "epoch": 0.6958130927596576,
1323
+ "grad_norm": 0.5173127055168152,
1324
+ "learning_rate": 1.8999006168568883e-05,
1325
+ "loss": 1.1485,
1326
+ "step": 188
1327
+ },
1328
+ {
1329
+ "epoch": 0.6995142262317835,
1330
+ "grad_norm": 0.517465353012085,
1331
+ "learning_rate": 1.8980181219988117e-05,
1332
+ "loss": 1.1349,
1333
+ "step": 189
1334
+ },
1335
+ {
1336
+ "epoch": 0.7032153597039094,
1337
+ "grad_norm": 0.4963802397251129,
1338
+ "learning_rate": 1.8961190408442662e-05,
1339
+ "loss": 1.1336,
1340
+ "step": 190
1341
+ },
1342
+ {
1343
+ "epoch": 0.7069164931760351,
1344
+ "grad_norm": 0.4494330883026123,
1345
+ "learning_rate": 1.8942034084690727e-05,
1346
+ "loss": 1.1215,
1347
+ "step": 191
1348
+ },
1349
+ {
1350
+ "epoch": 0.710617626648161,
1351
+ "grad_norm": 0.49428790807724,
1352
+ "learning_rate": 1.8922712602547516e-05,
1353
+ "loss": 1.1203,
1354
+ "step": 192
1355
+ },
1356
+ {
1357
+ "epoch": 0.7143187601202868,
1358
+ "grad_norm": 0.43069377541542053,
1359
+ "learning_rate": 1.89032263188787e-05,
1360
+ "loss": 1.1058,
1361
+ "step": 193
1362
+ },
1363
+ {
1364
+ "epoch": 0.7180198935924127,
1365
+ "grad_norm": 0.4609989523887634,
1366
+ "learning_rate": 1.8883575593593793e-05,
1367
+ "loss": 1.112,
1368
+ "step": 194
1369
+ },
1370
+ {
1371
+ "epoch": 0.7217210270645386,
1372
+ "grad_norm": 0.43700459599494934,
1373
+ "learning_rate": 1.8863760789639548e-05,
1374
+ "loss": 1.1566,
1375
+ "step": 195
1376
+ },
1377
+ {
1378
+ "epoch": 0.7254221605366643,
1379
+ "grad_norm": 0.480736643075943,
1380
+ "learning_rate": 1.8843782272993225e-05,
1381
+ "loss": 1.1499,
1382
+ "step": 196
1383
+ },
1384
+ {
1385
+ "epoch": 0.7291232940087902,
1386
+ "grad_norm": 0.4634736180305481,
1387
+ "learning_rate": 1.8823640412655844e-05,
1388
+ "loss": 1.0988,
1389
+ "step": 197
1390
+ },
1391
+ {
1392
+ "epoch": 0.732824427480916,
1393
+ "grad_norm": 0.4300520420074463,
1394
+ "learning_rate": 1.880333558064536e-05,
1395
+ "loss": 1.1141,
1396
+ "step": 198
1397
+ },
1398
+ {
1399
+ "epoch": 0.7365255609530419,
1400
+ "grad_norm": 0.4690033793449402,
1401
+ "learning_rate": 1.878286815198979e-05,
1402
+ "loss": 1.1454,
1403
+ "step": 199
1404
+ },
1405
+ {
1406
+ "epoch": 0.7402266944251678,
1407
+ "grad_norm": 0.40673312544822693,
1408
+ "learning_rate": 1.876223850472032e-05,
1409
+ "loss": 1.1273,
1410
+ "step": 200
1411
+ },
1412
+ {
1413
+ "epoch": 0.7439278278972935,
1414
+ "grad_norm": 0.4603167474269867,
1415
+ "learning_rate": 1.8741447019864263e-05,
1416
+ "loss": 1.1334,
1417
+ "step": 201
1418
+ },
1419
+ {
1420
+ "epoch": 0.7476289613694194,
1421
+ "grad_norm": 0.4728105068206787,
1422
+ "learning_rate": 1.872049408143808e-05,
1423
+ "loss": 1.1185,
1424
+ "step": 202
1425
+ },
1426
+ {
1427
+ "epoch": 0.7513300948415452,
1428
+ "grad_norm": 0.47324028611183167,
1429
+ "learning_rate": 1.8699380076440242e-05,
1430
+ "loss": 1.1362,
1431
+ "step": 203
1432
+ },
1433
+ {
1434
+ "epoch": 0.7550312283136711,
1435
+ "grad_norm": 0.4205387532711029,
1436
+ "learning_rate": 1.8678105394844114e-05,
1437
+ "loss": 1.1291,
1438
+ "step": 204
1439
+ },
1440
+ {
1441
+ "epoch": 0.758732361785797,
1442
+ "grad_norm": 0.4776360094547272,
1443
+ "learning_rate": 1.8656670429590745e-05,
1444
+ "loss": 1.1372,
1445
+ "step": 205
1446
+ },
1447
+ {
1448
+ "epoch": 0.7624334952579227,
1449
+ "grad_norm": 0.4702264964580536,
1450
+ "learning_rate": 1.8635075576581587e-05,
1451
+ "loss": 1.1183,
1452
+ "step": 206
1453
+ },
1454
+ {
1455
+ "epoch": 0.7661346287300486,
1456
+ "grad_norm": 0.39752131700515747,
1457
+ "learning_rate": 1.861332123467122e-05,
1458
+ "loss": 1.1463,
1459
+ "step": 207
1460
+ },
1461
+ {
1462
+ "epoch": 0.7698357622021744,
1463
+ "grad_norm": 0.5406947731971741,
1464
+ "learning_rate": 1.859140780565996e-05,
1465
+ "loss": 1.1443,
1466
+ "step": 208
1467
+ },
1468
+ {
1469
+ "epoch": 0.7735368956743003,
1470
+ "grad_norm": 0.3916800320148468,
1471
+ "learning_rate": 1.856933569428644e-05,
1472
+ "loss": 1.1318,
1473
+ "step": 209
1474
+ },
1475
+ {
1476
+ "epoch": 0.7772380291464261,
1477
+ "grad_norm": 0.4341064989566803,
1478
+ "learning_rate": 1.8547105308220142e-05,
1479
+ "loss": 1.119,
1480
+ "step": 210
1481
+ },
1482
+ {
1483
+ "epoch": 0.7809391626185519,
1484
+ "grad_norm": 0.4670443534851074,
1485
+ "learning_rate": 1.852471705805387e-05,
1486
+ "loss": 1.1254,
1487
+ "step": 211
1488
+ },
1489
+ {
1490
+ "epoch": 0.7846402960906778,
1491
+ "grad_norm": 0.4435541331768036,
1492
+ "learning_rate": 1.8502171357296144e-05,
1493
+ "loss": 1.1543,
1494
+ "step": 212
1495
+ },
1496
+ {
1497
+ "epoch": 0.7883414295628036,
1498
+ "grad_norm": 0.4735044836997986,
1499
+ "learning_rate": 1.84794686223636e-05,
1500
+ "loss": 1.1309,
1501
+ "step": 213
1502
+ },
1503
+ {
1504
+ "epoch": 0.7920425630349295,
1505
+ "grad_norm": 0.49449247121810913,
1506
+ "learning_rate": 1.8456609272573268e-05,
1507
+ "loss": 1.1602,
1508
+ "step": 214
1509
+ },
1510
+ {
1511
+ "epoch": 0.7957436965070552,
1512
+ "grad_norm": 0.46075525879859924,
1513
+ "learning_rate": 1.8433593730134835e-05,
1514
+ "loss": 1.1084,
1515
+ "step": 215
1516
+ },
1517
+ {
1518
+ "epoch": 0.7994448299791811,
1519
+ "grad_norm": 0.49750804901123047,
1520
+ "learning_rate": 1.841042242014285e-05,
1521
+ "loss": 1.1639,
1522
+ "step": 216
1523
+ },
1524
+ {
1525
+ "epoch": 0.803145963451307,
1526
+ "grad_norm": 0.46192413568496704,
1527
+ "learning_rate": 1.838709577056888e-05,
1528
+ "loss": 1.1041,
1529
+ "step": 217
1530
+ },
1531
+ {
1532
+ "epoch": 0.8068470969234328,
1533
+ "grad_norm": 0.47443869709968567,
1534
+ "learning_rate": 1.8363614212253585e-05,
1535
+ "loss": 1.1089,
1536
+ "step": 218
1537
+ },
1538
+ {
1539
+ "epoch": 0.8105482303955587,
1540
+ "grad_norm": 0.4756864309310913,
1541
+ "learning_rate": 1.833997817889878e-05,
1542
+ "loss": 1.105,
1543
+ "step": 219
1544
+ },
1545
+ {
1546
+ "epoch": 0.8142493638676844,
1547
+ "grad_norm": 0.43266934156417847,
1548
+ "learning_rate": 1.8316188107059418e-05,
1549
+ "loss": 1.1341,
1550
+ "step": 220
1551
+ },
1552
+ {
1553
+ "epoch": 0.8179504973398103,
1554
+ "grad_norm": 0.504323422908783,
1555
+ "learning_rate": 1.8292244436135517e-05,
1556
+ "loss": 1.1365,
1557
+ "step": 221
1558
+ },
1559
+ {
1560
+ "epoch": 0.8216516308119362,
1561
+ "grad_norm": 0.4178315997123718,
1562
+ "learning_rate": 1.8268147608364068e-05,
1563
+ "loss": 1.126,
1564
+ "step": 222
1565
+ },
1566
+ {
1567
+ "epoch": 0.825352764284062,
1568
+ "grad_norm": 0.4512770175933838,
1569
+ "learning_rate": 1.8243898068810833e-05,
1570
+ "loss": 1.0825,
1571
+ "step": 223
1572
+ },
1573
+ {
1574
+ "epoch": 0.8290538977561879,
1575
+ "grad_norm": 0.4697542190551758,
1576
+ "learning_rate": 1.8219496265362164e-05,
1577
+ "loss": 1.0932,
1578
+ "step": 224
1579
+ },
1580
+ {
1581
+ "epoch": 0.8327550312283136,
1582
+ "grad_norm": 0.4374232590198517,
1583
+ "learning_rate": 1.81949426487167e-05,
1584
+ "loss": 1.1341,
1585
+ "step": 225
1586
+ },
1587
+ {
1588
+ "epoch": 0.8364561647004395,
1589
+ "grad_norm": 0.41643136739730835,
1590
+ "learning_rate": 1.8170237672377046e-05,
1591
+ "loss": 1.1146,
1592
+ "step": 226
1593
+ },
1594
+ {
1595
+ "epoch": 0.8401572981725653,
1596
+ "grad_norm": 0.45138299465179443,
1597
+ "learning_rate": 1.814538179264142e-05,
1598
+ "loss": 1.1147,
1599
+ "step": 227
1600
+ },
1601
+ {
1602
+ "epoch": 0.8438584316446912,
1603
+ "grad_norm": 0.4233904778957367,
1604
+ "learning_rate": 1.81203754685952e-05,
1605
+ "loss": 1.1297,
1606
+ "step": 228
1607
+ },
1608
+ {
1609
+ "epoch": 0.8475595651168171,
1610
+ "grad_norm": 0.5106139183044434,
1611
+ "learning_rate": 1.8095219162102453e-05,
1612
+ "loss": 1.1147,
1613
+ "step": 229
1614
+ },
1615
+ {
1616
+ "epoch": 0.8512606985889428,
1617
+ "grad_norm": 0.44398224353790283,
1618
+ "learning_rate": 1.8069913337797414e-05,
1619
+ "loss": 1.1274,
1620
+ "step": 230
1621
+ },
1622
+ {
1623
+ "epoch": 0.8549618320610687,
1624
+ "grad_norm": 0.506275475025177,
1625
+ "learning_rate": 1.804445846307588e-05,
1626
+ "loss": 1.1383,
1627
+ "step": 231
1628
+ },
1629
+ {
1630
+ "epoch": 0.8586629655331945,
1631
+ "grad_norm": 0.46077436208724976,
1632
+ "learning_rate": 1.801885500808661e-05,
1633
+ "loss": 1.0964,
1634
+ "step": 232
1635
+ },
1636
+ {
1637
+ "epoch": 0.8623640990053204,
1638
+ "grad_norm": 0.5077394843101501,
1639
+ "learning_rate": 1.7993103445722615e-05,
1640
+ "loss": 1.131,
1641
+ "step": 233
1642
+ },
1643
+ {
1644
+ "epoch": 0.8660652324774463,
1645
+ "grad_norm": 0.48133185505867004,
1646
+ "learning_rate": 1.7967204251612432e-05,
1647
+ "loss": 1.1366,
1648
+ "step": 234
1649
+ },
1650
+ {
1651
+ "epoch": 0.869766365949572,
1652
+ "grad_norm": 0.4848846197128296,
1653
+ "learning_rate": 1.7941157904111346e-05,
1654
+ "loss": 1.1426,
1655
+ "step": 235
1656
+ },
1657
+ {
1658
+ "epoch": 0.8734674994216979,
1659
+ "grad_norm": 0.5075129866600037,
1660
+ "learning_rate": 1.7914964884292543e-05,
1661
+ "loss": 1.1,
1662
+ "step": 236
1663
+ },
1664
+ {
1665
+ "epoch": 0.8771686328938237,
1666
+ "grad_norm": 0.43443480134010315,
1667
+ "learning_rate": 1.7888625675938237e-05,
1668
+ "loss": 1.1435,
1669
+ "step": 237
1670
+ },
1671
+ {
1672
+ "epoch": 0.8808697663659496,
1673
+ "grad_norm": 0.567414402961731,
1674
+ "learning_rate": 1.7862140765530718e-05,
1675
+ "loss": 1.0904,
1676
+ "step": 238
1677
+ },
1678
+ {
1679
+ "epoch": 0.8845708998380755,
1680
+ "grad_norm": 0.5032252073287964,
1681
+ "learning_rate": 1.783551064224339e-05,
1682
+ "loss": 1.1429,
1683
+ "step": 239
1684
+ },
1685
+ {
1686
+ "epoch": 0.8882720333102012,
1687
+ "grad_norm": 0.461820513010025,
1688
+ "learning_rate": 1.7808735797931715e-05,
1689
+ "loss": 1.1108,
1690
+ "step": 240
1691
+ },
1692
+ {
1693
+ "epoch": 0.8919731667823271,
1694
+ "grad_norm": 0.4861697852611542,
1695
+ "learning_rate": 1.7781816727124138e-05,
1696
+ "loss": 1.0938,
1697
+ "step": 241
1698
+ },
1699
+ {
1700
+ "epoch": 0.8956743002544529,
1701
+ "grad_norm": 0.5366714000701904,
1702
+ "learning_rate": 1.7754753927012955e-05,
1703
+ "loss": 1.1144,
1704
+ "step": 242
1705
+ },
1706
+ {
1707
+ "epoch": 0.8993754337265788,
1708
+ "grad_norm": 0.4563060700893402,
1709
+ "learning_rate": 1.7727547897445117e-05,
1710
+ "loss": 1.1164,
1711
+ "step": 243
1712
+ },
1713
+ {
1714
+ "epoch": 0.9030765671987045,
1715
+ "grad_norm": 0.5184001922607422,
1716
+ "learning_rate": 1.770019914091302e-05,
1717
+ "loss": 1.1357,
1718
+ "step": 244
1719
+ },
1720
+ {
1721
+ "epoch": 0.9067777006708304,
1722
+ "grad_norm": 0.44209641218185425,
1723
+ "learning_rate": 1.76727081625452e-05,
1724
+ "loss": 1.1145,
1725
+ "step": 245
1726
+ },
1727
+ {
1728
+ "epoch": 0.9104788341429563,
1729
+ "grad_norm": 0.5243192315101624,
1730
+ "learning_rate": 1.7645075470097024e-05,
1731
+ "loss": 1.1238,
1732
+ "step": 246
1733
+ },
1734
+ {
1735
+ "epoch": 0.9141799676150821,
1736
+ "grad_norm": 0.4946383237838745,
1737
+ "learning_rate": 1.7617301573941296e-05,
1738
+ "loss": 1.1248,
1739
+ "step": 247
1740
+ },
1741
+ {
1742
+ "epoch": 0.917881101087208,
1743
+ "grad_norm": 0.4805106520652771,
1744
+ "learning_rate": 1.758938698705884e-05,
1745
+ "loss": 1.1303,
1746
+ "step": 248
1747
+ },
1748
+ {
1749
+ "epoch": 0.9215822345593337,
1750
+ "grad_norm": 0.44972893595695496,
1751
+ "learning_rate": 1.7561332225029022e-05,
1752
+ "loss": 1.1356,
1753
+ "step": 249
1754
+ },
1755
+ {
1756
+ "epoch": 0.9252833680314596,
1757
+ "grad_norm": 0.45467665791511536,
1758
+ "learning_rate": 1.7533137806020226e-05,
1759
+ "loss": 1.0766,
1760
+ "step": 250
1761
+ },
1762
+ {
1763
+ "epoch": 0.9289845015035855,
1764
+ "grad_norm": 0.5250957608222961,
1765
+ "learning_rate": 1.7504804250780292e-05,
1766
+ "loss": 1.1021,
1767
+ "step": 251
1768
+ },
1769
+ {
1770
+ "epoch": 0.9326856349757113,
1771
+ "grad_norm": 0.3975278437137604,
1772
+ "learning_rate": 1.747633208262688e-05,
1773
+ "loss": 1.115,
1774
+ "step": 252
1775
+ },
1776
+ {
1777
+ "epoch": 0.9363867684478372,
1778
+ "grad_norm": 0.529544472694397,
1779
+ "learning_rate": 1.744772182743782e-05,
1780
+ "loss": 1.1115,
1781
+ "step": 253
1782
+ },
1783
+ {
1784
+ "epoch": 0.9400879019199629,
1785
+ "grad_norm": 0.460066556930542,
1786
+ "learning_rate": 1.74189740136414e-05,
1787
+ "loss": 1.1012,
1788
+ "step": 254
1789
+ },
1790
+ {
1791
+ "epoch": 0.9437890353920888,
1792
+ "grad_norm": 0.4442214071750641,
1793
+ "learning_rate": 1.7390089172206594e-05,
1794
+ "loss": 1.1247,
1795
+ "step": 255
1796
+ },
1797
+ {
1798
+ "epoch": 0.9474901688642147,
1799
+ "grad_norm": 0.5348288416862488,
1800
+ "learning_rate": 1.736106783663326e-05,
1801
+ "loss": 1.0952,
1802
+ "step": 256
1803
+ },
1804
+ {
1805
+ "epoch": 0.9511913023363405,
1806
+ "grad_norm": 0.3909313678741455,
1807
+ "learning_rate": 1.7331910542942298e-05,
1808
+ "loss": 1.1331,
1809
+ "step": 257
1810
+ },
1811
+ {
1812
+ "epoch": 0.9548924358084664,
1813
+ "grad_norm": 0.49731630086898804,
1814
+ "learning_rate": 1.7302617829665725e-05,
1815
+ "loss": 1.1077,
1816
+ "step": 258
1817
+ },
1818
+ {
1819
+ "epoch": 0.9585935692805921,
1820
+ "grad_norm": 0.4206913709640503,
1821
+ "learning_rate": 1.7273190237836757e-05,
1822
+ "loss": 1.1321,
1823
+ "step": 259
1824
+ },
1825
+ {
1826
+ "epoch": 0.962294702752718,
1827
+ "grad_norm": 0.5688621401786804,
1828
+ "learning_rate": 1.7243628310979793e-05,
1829
+ "loss": 1.1318,
1830
+ "step": 260
1831
+ },
1832
+ {
1833
+ "epoch": 0.9659958362248439,
1834
+ "grad_norm": 0.4344209134578705,
1835
+ "learning_rate": 1.7213932595100384e-05,
1836
+ "loss": 1.1188,
1837
+ "step": 261
1838
+ },
1839
+ {
1840
+ "epoch": 0.9696969696969697,
1841
+ "grad_norm": 0.482854425907135,
1842
+ "learning_rate": 1.7184103638675157e-05,
1843
+ "loss": 1.116,
1844
+ "step": 262
1845
+ },
1846
+ {
1847
+ "epoch": 0.9733981031690956,
1848
+ "grad_norm": 0.4521266520023346,
1849
+ "learning_rate": 1.715414199264168e-05,
1850
+ "loss": 1.0995,
1851
+ "step": 263
1852
+ },
1853
+ {
1854
+ "epoch": 0.9770992366412213,
1855
+ "grad_norm": 0.47049805521965027,
1856
+ "learning_rate": 1.7124048210388268e-05,
1857
+ "loss": 1.1207,
1858
+ "step": 264
1859
+ },
1860
+ {
1861
+ "epoch": 0.9808003701133472,
1862
+ "grad_norm": 0.3913280963897705,
1863
+ "learning_rate": 1.709382284774379e-05,
1864
+ "loss": 1.1493,
1865
+ "step": 265
1866
+ },
1867
+ {
1868
+ "epoch": 0.9845015035854731,
1869
+ "grad_norm": 0.3968203067779541,
1870
+ "learning_rate": 1.706346646296739e-05,
1871
+ "loss": 1.1241,
1872
+ "step": 266
1873
+ },
1874
+ {
1875
+ "epoch": 0.9882026370575989,
1876
+ "grad_norm": 0.46026578545570374,
1877
+ "learning_rate": 1.7032979616738167e-05,
1878
+ "loss": 1.1181,
1879
+ "step": 267
1880
+ },
1881
+ {
1882
+ "epoch": 0.9919037705297248,
1883
+ "grad_norm": 0.3910259008407593,
1884
+ "learning_rate": 1.7002362872144843e-05,
1885
+ "loss": 1.1082,
1886
+ "step": 268
1887
+ },
1888
+ {
1889
+ "epoch": 0.9956049040018505,
1890
+ "grad_norm": 0.40289193391799927,
1891
+ "learning_rate": 1.697161679467534e-05,
1892
+ "loss": 1.1013,
1893
+ "step": 269
1894
+ },
1895
+ {
1896
+ "epoch": 0.9993060374739764,
1897
+ "grad_norm": 0.4233797788619995,
1898
+ "learning_rate": 1.6940741952206342e-05,
1899
+ "loss": 1.1123,
1900
+ "step": 270
1901
+ },
1902
+ {
1903
+ "epoch": 1.0,
1904
+ "grad_norm": 0.4233797788619995,
1905
+ "learning_rate": 1.6909738914992812e-05,
1906
+ "loss": 1.1457,
1907
+ "step": 271
1908
+ }
1909
+ ],
1910
+ "logging_steps": 1,
1911
+ "max_steps": 813,
1912
+ "num_input_tokens_seen": 0,
1913
+ "num_train_epochs": 3,
1914
+ "save_steps": 500,
1915
+ "stateful_callbacks": {
1916
+ "TrainerControl": {
1917
+ "args": {
1918
+ "should_epoch_stop": false,
1919
+ "should_evaluate": false,
1920
+ "should_log": false,
1921
+ "should_save": true,
1922
+ "should_training_stop": false
1923
+ },
1924
+ "attributes": {}
1925
+ }
1926
+ },
1927
+ "total_flos": 333261142228992.0,
1928
+ "train_batch_size": 1,
1929
+ "trial_name": null,
1930
+ "trial_params": null
1931
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e43d868fe1564fb914d2dd355a72fcee2b77979729717f01381d32ca71a38ea3
3
+ size 7672
vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
zero_to_fp32.py ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import json
25
+ from tqdm import tqdm
26
+ from collections import OrderedDict
27
+ from dataclasses import dataclass
28
+
29
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
30
+ # DeepSpeed data structures it has to be available in the current python environment.
31
+ from deepspeed.utils import logger
32
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
33
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
34
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
35
+
36
+
37
+ @dataclass
38
+ class zero_model_state:
39
+ buffers: dict()
40
+ param_shapes: dict()
41
+ shared_params: list
42
+ ds_version: int
43
+ frozen_param_shapes: dict()
44
+ frozen_param_fragments: dict()
45
+
46
+
47
+ debug = 0
48
+
49
+ # load to cpu
50
+ device = torch.device('cpu')
51
+
52
+
53
+ def atoi(text):
54
+ return int(text) if text.isdigit() else text
55
+
56
+
57
+ def natural_keys(text):
58
+ '''
59
+ alist.sort(key=natural_keys) sorts in human order
60
+ http://nedbatchelder.com/blog/200712/human_sorting.html
61
+ (See Toothy's implementation in the comments)
62
+ '''
63
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
64
+
65
+
66
+ def get_model_state_file(checkpoint_dir, zero_stage):
67
+ if not os.path.isdir(checkpoint_dir):
68
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
69
+
70
+ # there should be only one file
71
+ if zero_stage <= 2:
72
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
73
+ elif zero_stage == 3:
74
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
75
+
76
+ if not os.path.exists(file):
77
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
78
+
79
+ return file
80
+
81
+
82
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
83
+ # XXX: need to test that this simple glob rule works for multi-node setup too
84
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
85
+
86
+ if len(ckpt_files) == 0:
87
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
88
+
89
+ return ckpt_files
90
+
91
+
92
+ def get_optim_files(checkpoint_dir):
93
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
94
+
95
+
96
+ def get_model_state_files(checkpoint_dir):
97
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
98
+
99
+
100
+ def parse_model_states(files):
101
+ zero_model_states = []
102
+ for file in files:
103
+ state_dict = torch.load(file, map_location=device)
104
+
105
+ if BUFFER_NAMES not in state_dict:
106
+ raise ValueError(f"{file} is not a model state checkpoint")
107
+ buffer_names = state_dict[BUFFER_NAMES]
108
+ if debug:
109
+ print("Found buffers:", buffer_names)
110
+
111
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
112
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
113
+ param_shapes = state_dict[PARAM_SHAPES]
114
+
115
+ # collect parameters that are included in param_shapes
116
+ param_names = []
117
+ for s in param_shapes:
118
+ for name in s.keys():
119
+ param_names.append(name)
120
+
121
+ # update with frozen parameters
122
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
123
+ if frozen_param_shapes is not None:
124
+ if debug:
125
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
126
+ param_names += list(frozen_param_shapes.keys())
127
+
128
+ # handle shared params
129
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
130
+
131
+ ds_version = state_dict.get(DS_VERSION, None)
132
+
133
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
134
+
135
+ z_model_state = zero_model_state(buffers=buffers,
136
+ param_shapes=param_shapes,
137
+ shared_params=shared_params,
138
+ ds_version=ds_version,
139
+ frozen_param_shapes=frozen_param_shapes,
140
+ frozen_param_fragments=frozen_param_fragments)
141
+ zero_model_states.append(z_model_state)
142
+
143
+ return zero_model_states
144
+
145
+
146
+ def parse_optim_states(files, ds_checkpoint_dir):
147
+ total_files = len(files)
148
+ state_dicts = []
149
+ for f in files:
150
+ state_dict = torch.load(f, map_location=device)
151
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
152
+ # and also handle the case where it was already removed by another helper script
153
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
154
+ state_dicts.append(state_dict)
155
+
156
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
157
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
158
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
159
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
160
+
161
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
162
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
163
+ # use the max of the partition_count to get the dp world_size.
164
+
165
+ if type(world_size) is list:
166
+ world_size = max(world_size)
167
+
168
+ if world_size != total_files:
169
+ raise ValueError(
170
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
171
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
172
+ )
173
+
174
+ # the groups are named differently in each stage
175
+ if zero_stage <= 2:
176
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
177
+ elif zero_stage == 3:
178
+ fp32_groups_key = FP32_FLAT_GROUPS
179
+ else:
180
+ raise ValueError(f"unknown zero stage {zero_stage}")
181
+
182
+ if zero_stage <= 2:
183
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
184
+ elif zero_stage == 3:
185
+ # if there is more than one param group, there will be multiple flattened tensors - one
186
+ # flattened tensor per group - for simplicity merge them into a single tensor
187
+ #
188
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
189
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
190
+
191
+ fp32_flat_groups = [
192
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
193
+ ]
194
+
195
+ return zero_stage, world_size, fp32_flat_groups
196
+
197
+
198
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
199
+ """
200
+ Returns fp32 state_dict reconstructed from ds checkpoint
201
+
202
+ Args:
203
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
204
+
205
+ """
206
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
207
+
208
+ optim_files = get_optim_files(ds_checkpoint_dir)
209
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
210
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
211
+
212
+ model_files = get_model_state_files(ds_checkpoint_dir)
213
+
214
+ zero_model_states = parse_model_states(model_files)
215
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
216
+
217
+ if zero_stage <= 2:
218
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
219
+ exclude_frozen_parameters)
220
+ elif zero_stage == 3:
221
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
222
+ exclude_frozen_parameters)
223
+
224
+
225
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
226
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
227
+ return
228
+
229
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
230
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
231
+
232
+ if debug:
233
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
234
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
235
+
236
+ wanted_params = len(frozen_param_shapes)
237
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
238
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
239
+ print(f'Frozen params: Have {avail_numel} numels to process.')
240
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
241
+
242
+ total_params = 0
243
+ total_numel = 0
244
+ for name, shape in frozen_param_shapes.items():
245
+ total_params += 1
246
+ unpartitioned_numel = shape.numel()
247
+ total_numel += unpartitioned_numel
248
+
249
+ state_dict[name] = frozen_param_fragments[name]
250
+
251
+ if debug:
252
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
253
+
254
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
255
+
256
+
257
+ def _has_callable(obj, fn):
258
+ attr = getattr(obj, fn, None)
259
+ return callable(attr)
260
+
261
+
262
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
263
+ param_shapes = zero_model_states[0].param_shapes
264
+
265
+ # Reconstruction protocol:
266
+ #
267
+ # XXX: document this
268
+
269
+ if debug:
270
+ for i in range(world_size):
271
+ for j in range(len(fp32_flat_groups[0])):
272
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
273
+
274
+ # XXX: memory usage doubles here (zero2)
275
+ num_param_groups = len(fp32_flat_groups[0])
276
+ merged_single_partition_of_fp32_groups = []
277
+ for i in range(num_param_groups):
278
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
279
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
280
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
281
+ avail_numel = sum(
282
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
283
+
284
+ if debug:
285
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
286
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
287
+ # not asserting if there is a mismatch due to possible padding
288
+ print(f"Have {avail_numel} numels to process.")
289
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
290
+
291
+ # params
292
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
293
+ # out-of-core computing solution
294
+ total_numel = 0
295
+ total_params = 0
296
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
297
+ offset = 0
298
+ avail_numel = full_single_fp32_vector.numel()
299
+ for name, shape in shapes.items():
300
+
301
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
302
+ total_numel += unpartitioned_numel
303
+ total_params += 1
304
+
305
+ if debug:
306
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
307
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
308
+ offset += unpartitioned_numel
309
+
310
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
311
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
312
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
313
+ # live optimizer object, so we are checking that the numbers are within the right range
314
+ align_to = 2 * world_size
315
+
316
+ def zero2_align(x):
317
+ return align_to * math.ceil(x / align_to)
318
+
319
+ if debug:
320
+ print(f"original offset={offset}, avail_numel={avail_numel}")
321
+
322
+ offset = zero2_align(offset)
323
+ avail_numel = zero2_align(avail_numel)
324
+
325
+ if debug:
326
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
327
+
328
+ # Sanity check
329
+ if offset != avail_numel:
330
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
331
+
332
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
333
+
334
+
335
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
336
+ exclude_frozen_parameters):
337
+ state_dict = OrderedDict()
338
+
339
+ # buffers
340
+ buffers = zero_model_states[0].buffers
341
+ state_dict.update(buffers)
342
+ if debug:
343
+ print(f"added {len(buffers)} buffers")
344
+
345
+ if not exclude_frozen_parameters:
346
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
347
+
348
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
349
+
350
+ # recover shared parameters
351
+ for pair in zero_model_states[0].shared_params:
352
+ if pair[1] in state_dict:
353
+ state_dict[pair[0]] = state_dict[pair[1]]
354
+
355
+ return state_dict
356
+
357
+
358
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
359
+ remainder = unpartitioned_numel % world_size
360
+ padding_numel = (world_size - remainder) if remainder else 0
361
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
362
+ return partitioned_numel, padding_numel
363
+
364
+
365
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
366
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
367
+ return
368
+
369
+ if debug:
370
+ for i in range(world_size):
371
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
372
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
373
+
374
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
375
+ wanted_params = len(frozen_param_shapes)
376
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
377
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
378
+ print(f'Frozen params: Have {avail_numel} numels to process.')
379
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
380
+
381
+ total_params = 0
382
+ total_numel = 0
383
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
384
+ total_params += 1
385
+ unpartitioned_numel = shape.numel()
386
+ total_numel += unpartitioned_numel
387
+
388
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
389
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
390
+
391
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
392
+
393
+ if debug:
394
+ print(
395
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
396
+ )
397
+
398
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
399
+
400
+
401
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
402
+ param_shapes = zero_model_states[0].param_shapes
403
+ avail_numel = fp32_flat_groups[0].numel() * world_size
404
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
405
+ # param, re-consolidating each param, while dealing with padding if any
406
+
407
+ # merge list of dicts, preserving order
408
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
409
+
410
+ if debug:
411
+ for i in range(world_size):
412
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
413
+
414
+ wanted_params = len(param_shapes)
415
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
416
+ # not asserting if there is a mismatch due to possible padding
417
+ avail_numel = fp32_flat_groups[0].numel() * world_size
418
+ print(f"Trainable params: Have {avail_numel} numels to process.")
419
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
420
+
421
+ # params
422
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
423
+ # out-of-core computing solution
424
+ offset = 0
425
+ total_numel = 0
426
+ total_params = 0
427
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
428
+ unpartitioned_numel = shape.numel()
429
+ total_numel += unpartitioned_numel
430
+ total_params += 1
431
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
432
+
433
+ if debug:
434
+ print(
435
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
436
+ )
437
+
438
+ # XXX: memory usage doubles here
439
+ state_dict[name] = torch.cat(
440
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
441
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
442
+ offset += partitioned_numel
443
+
444
+ offset *= world_size
445
+
446
+ # Sanity check
447
+ if offset != avail_numel:
448
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
449
+
450
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
451
+
452
+
453
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
454
+ exclude_frozen_parameters):
455
+ state_dict = OrderedDict()
456
+
457
+ # buffers
458
+ buffers = zero_model_states[0].buffers
459
+ state_dict.update(buffers)
460
+ if debug:
461
+ print(f"added {len(buffers)} buffers")
462
+
463
+ if not exclude_frozen_parameters:
464
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
465
+
466
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
467
+
468
+ # recover shared parameters
469
+ for pair in zero_model_states[0].shared_params:
470
+ if pair[1] in state_dict:
471
+ state_dict[pair[0]] = state_dict[pair[1]]
472
+
473
+ return state_dict
474
+
475
+
476
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
477
+ """
478
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
479
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
480
+ via a model hub.
481
+
482
+ Args:
483
+ - ``checkpoint_dir``: path to the desired checkpoint folder
484
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
485
+ - ``exclude_frozen_parameters``: exclude frozen parameters
486
+
487
+ Returns:
488
+ - pytorch ``state_dict``
489
+
490
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
491
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
492
+ the checkpoint.
493
+
494
+ A typical usage might be ::
495
+
496
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
497
+ # do the training and checkpoint saving
498
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
499
+ model = model.cpu() # move to cpu
500
+ model.load_state_dict(state_dict)
501
+ # submit to model hub or save the model to share with others
502
+
503
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
504
+ application. i.e. you will need to re-initialize the deepspeed engine, since
505
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
506
+
507
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
508
+
509
+ """
510
+ if tag is None:
511
+ latest_path = os.path.join(checkpoint_dir, 'latest')
512
+ if os.path.isfile(latest_path):
513
+ with open(latest_path, 'r') as fd:
514
+ tag = fd.read().strip()
515
+ else:
516
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
517
+
518
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
519
+
520
+ if not os.path.isdir(ds_checkpoint_dir):
521
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
522
+
523
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
524
+
525
+
526
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
527
+ output_dir,
528
+ max_shard_size="5GB",
529
+ safe_serialization=False,
530
+ tag=None,
531
+ exclude_frozen_parameters=False):
532
+ """
533
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
534
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
535
+
536
+ Args:
537
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
538
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
539
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
540
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
541
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
542
+ - ``exclude_frozen_parameters``: exclude frozen parameters
543
+ """
544
+ # Dependency pre-check
545
+ if safe_serialization:
546
+ try:
547
+ from safetensors.torch import save_file
548
+ except ImportError:
549
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
550
+ raise
551
+ if max_shard_size is not None:
552
+ try:
553
+ from huggingface_hub import split_torch_state_dict_into_shards
554
+ except ImportError:
555
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
556
+ raise
557
+
558
+ # Convert zero checkpoint to state_dict
559
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
560
+
561
+ # Shard the model if it is too big.
562
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
563
+ if max_shard_size is not None:
564
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
565
+ state_dict_split = split_torch_state_dict_into_shards(state_dict,
566
+ filename_pattern=filename_pattern,
567
+ max_shard_size=max_shard_size)
568
+ else:
569
+ from collections import namedtuple
570
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
571
+ state_dict_split = StateDictSplit(is_sharded=False,
572
+ filename_to_tensors={weights_name: list(state_dict.keys())})
573
+
574
+ # Save the model
575
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
576
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
577
+ shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
578
+ output_path = os.path.join(output_dir, shard_file)
579
+ if safe_serialization:
580
+ save_file(shard, output_path, metadata={"format": "pt"})
581
+ else:
582
+ torch.save(shard, output_path)
583
+
584
+ # Save index if sharded
585
+ if state_dict_split.is_sharded:
586
+ index = {
587
+ "metadata": state_dict_split.metadata,
588
+ "weight_map": state_dict_split.tensor_to_filename,
589
+ }
590
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
591
+ save_index_file = os.path.join(output_dir, save_index_file)
592
+ with open(save_index_file, "w", encoding="utf-8") as f:
593
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
594
+ f.write(content)
595
+
596
+
597
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
598
+ """
599
+ 1. Put the provided model to cpu
600
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
601
+ 3. Load it into the provided model
602
+
603
+ Args:
604
+ - ``model``: the model object to update
605
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
606
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
607
+
608
+ Returns:
609
+ - ``model`: modified model
610
+
611
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
612
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
613
+ conveniently placed for you in the checkpoint folder.
614
+
615
+ A typical usage might be ::
616
+
617
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
618
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
619
+ # submit to model hub or save the model to share with others
620
+
621
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
622
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
623
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
624
+
625
+ """
626
+ logger.info(f"Extracting fp32 weights")
627
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
628
+
629
+ logger.info(f"Overwriting model with fp32 weights")
630
+ model = model.cpu()
631
+ model.load_state_dict(state_dict, strict=False)
632
+
633
+ return model
634
+
635
+
636
+ if __name__ == "__main__":
637
+ parser = argparse.ArgumentParser()
638
+ parser.add_argument("checkpoint_dir",
639
+ type=str,
640
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
641
+ parser.add_argument("output_dir",
642
+ type=str,
643
+ help="directory to the pytorch fp32 state_dict output files"
644
+ "(e.g. path/checkpoint-12-output/)")
645
+ parser.add_argument(
646
+ "--max_shard_size",
647
+ type=str,
648
+ default="5GB",
649
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
650
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
651
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
652
+ "without CPU OOM issues.")
653
+ parser.add_argument(
654
+ "--safe_serialization",
655
+ default=False,
656
+ action='store_true',
657
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
658
+ parser.add_argument("-t",
659
+ "--tag",
660
+ type=str,
661
+ default=None,
662
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
663
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
664
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
665
+ args = parser.parse_args()
666
+
667
+ debug = args.debug
668
+
669
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
670
+ args.output_dir,
671
+ max_shard_size=args.max_shard_size,
672
+ safe_serialization=args.safe_serialization,
673
+ tag=args.tag,
674
+ exclude_frozen_parameters=args.exclude_frozen_parameters)