BKM1804 commited on
Commit
0568210
·
verified ·
1 Parent(s): 8e65dee

Upload task output 5846ca31-dcde-4604-917f-3e562a84c4c9

Browse files
added_tokens.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 32001,
3
+ "<|endoftext|>": 32000,
4
+ "<|end|>": 32007,
5
+ "<|placeholder1|>": 32002,
6
+ "<|placeholder2|>": 32003,
7
+ "<|placeholder3|>": 32004,
8
+ "<|placeholder4|>": 32005,
9
+ "<|placeholder5|>": 32008,
10
+ "<|placeholder6|>": 32009,
11
+ "<|system|>": 32006,
12
+ "<|user|>": 32010
13
+ }
config.json ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Phi3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_phi3.Phi3Config",
9
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
10
+ },
11
+ "bos_token_id": 1,
12
+ "embd_pdrop": 0.0,
13
+ "eos_token_id": 32000,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 3072,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 8192,
18
+ "max_position_embeddings": 131072,
19
+ "model_type": "phi3",
20
+ "num_attention_heads": 32,
21
+ "num_hidden_layers": 32,
22
+ "num_key_value_heads": 32,
23
+ "original_max_position_embeddings": 4096,
24
+ "pad_token_id": 32000,
25
+ "partial_rotary_factor": 1.0,
26
+ "resid_pdrop": 0.0,
27
+ "rms_norm_eps": 1e-05,
28
+ "rope_scaling": {
29
+ "long_factor": [
30
+ 1.0700000524520874,
31
+ 1.1200000047683716,
32
+ 1.149999976158142,
33
+ 1.4199999570846558,
34
+ 1.5699999332427979,
35
+ 1.7999999523162842,
36
+ 2.129999876022339,
37
+ 2.129999876022339,
38
+ 3.009999990463257,
39
+ 5.910000324249268,
40
+ 6.950000286102295,
41
+ 9.070000648498535,
42
+ 9.930000305175781,
43
+ 10.710000038146973,
44
+ 11.130000114440918,
45
+ 14.609999656677246,
46
+ 15.409998893737793,
47
+ 19.809999465942383,
48
+ 37.279998779296875,
49
+ 38.279998779296875,
50
+ 38.599998474121094,
51
+ 40.12000274658203,
52
+ 46.20000457763672,
53
+ 50.940006256103516,
54
+ 53.66000747680664,
55
+ 54.9373893737793,
56
+ 56.89738845825195,
57
+ 57.28738784790039,
58
+ 59.98738479614258,
59
+ 60.86738586425781,
60
+ 60.887386322021484,
61
+ 61.71739196777344,
62
+ 62.91739273071289,
63
+ 62.957393646240234,
64
+ 63.41739273071289,
65
+ 63.8173942565918,
66
+ 63.83739471435547,
67
+ 63.897396087646484,
68
+ 63.93739700317383,
69
+ 64.06739807128906,
70
+ 64.11434936523438,
71
+ 64.12435150146484,
72
+ 64.15435028076172,
73
+ 64.19435119628906,
74
+ 64.24435424804688,
75
+ 64.57435607910156,
76
+ 64.69000244140625,
77
+ 64.76000213623047
78
+ ],
79
+ "short_factor": [
80
+ 1.1,
81
+ 1.1,
82
+ 1.1,
83
+ 1.3000000000000003,
84
+ 1.3500000000000003,
85
+ 1.3500000000000003,
86
+ 1.4000000000000004,
87
+ 1.5500000000000005,
88
+ 2.000000000000001,
89
+ 2.000000000000001,
90
+ 2.000000000000001,
91
+ 2.000000000000001,
92
+ 2.000000000000001,
93
+ 2.000000000000001,
94
+ 2.000000000000001,
95
+ 2.000000000000001,
96
+ 2.000000000000001,
97
+ 2.000000000000001,
98
+ 2.000000000000001,
99
+ 2.000000000000001,
100
+ 2.000000000000001,
101
+ 2.000000000000001,
102
+ 2.000000000000001,
103
+ 2.000000000000001,
104
+ 2.000000000000001,
105
+ 2.0500000000000007,
106
+ 2.0500000000000007,
107
+ 2.0500000000000007,
108
+ 2.0500000000000007,
109
+ 2.0500000000000007,
110
+ 2.0500000000000007,
111
+ 2.1000000000000005,
112
+ 2.1000000000000005,
113
+ 2.1500000000000004,
114
+ 2.25,
115
+ 2.25,
116
+ 2.25,
117
+ 2.25,
118
+ 2.25,
119
+ 2.3999999999999995,
120
+ 2.4499999999999993,
121
+ 2.499999999999999,
122
+ 2.6999999999999984,
123
+ 2.6999999999999984,
124
+ 2.7499999999999982,
125
+ 2.799999999999998,
126
+ 2.8999999999999977,
127
+ 3.049999999999997
128
+ ],
129
+ "type": "longrope"
130
+ },
131
+ "rope_theta": 10000.0,
132
+ "sliding_window": 262144,
133
+ "tie_word_embeddings": false,
134
+ "torch_dtype": "bfloat16",
135
+ "transformers_version": "4.51.3",
136
+ "use_cache": false,
137
+ "vocab_size": 32064
138
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": [
5
+ 32000,
6
+ 32001,
7
+ 32007
8
+ ],
9
+ "pad_token_id": 32000,
10
+ "transformers_version": "4.51.3"
11
+ }
loss.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 1101,1.6452035903930664
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f30dfbd12cc5435a6a703c704915b534122531eb5dde67864909155adf975e8
3
+ size 4972489328
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d03e7dacf17aea66a7be1fd65aa364a1ee422a0029d8bd024dfae1f6001cd7d
3
+ size 2669692552
model.safetensors.index.json ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 7642159104
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00002-of-00002.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.1.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.1.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.10.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
23
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.10.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.11.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.11.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.12.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.12.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
39
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.13.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.13.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.14.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.14.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.15.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.layers.15.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.16.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.16.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.17.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.17.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.18.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
71
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.18.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.19.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.19.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.2.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.2.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
87
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.20.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.20.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
93
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
94
+ "model.layers.21.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
95
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
96
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.21.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
98
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
99
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
100
+ "model.layers.22.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
101
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
102
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
103
+ "model.layers.22.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
104
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
105
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
106
+ "model.layers.23.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
107
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
108
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
109
+ "model.layers.23.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
110
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
111
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
112
+ "model.layers.24.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
113
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
114
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
115
+ "model.layers.24.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
116
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
117
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
118
+ "model.layers.25.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
119
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
120
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
121
+ "model.layers.25.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
122
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
123
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
124
+ "model.layers.26.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
125
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
126
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
127
+ "model.layers.26.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
128
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
129
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
130
+ "model.layers.27.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
131
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
132
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
133
+ "model.layers.27.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
134
+ "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
135
+ "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
136
+ "model.layers.28.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
137
+ "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
138
+ "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
139
+ "model.layers.28.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
140
+ "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
141
+ "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
142
+ "model.layers.29.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
143
+ "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
144
+ "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
145
+ "model.layers.29.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
146
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.3.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
151
+ "model.layers.3.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
152
+ "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
153
+ "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
154
+ "model.layers.30.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
155
+ "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
156
+ "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
157
+ "model.layers.30.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
158
+ "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
159
+ "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
160
+ "model.layers.31.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
161
+ "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
162
+ "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
163
+ "model.layers.31.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
164
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
165
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
166
+ "model.layers.4.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
167
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
168
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
169
+ "model.layers.4.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
170
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
171
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.layers.5.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
174
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
175
+ "model.layers.5.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
176
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
177
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
178
+ "model.layers.6.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
179
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
180
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
181
+ "model.layers.6.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
182
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
183
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
184
+ "model.layers.7.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
185
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
186
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
187
+ "model.layers.7.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
188
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
189
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
190
+ "model.layers.8.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
191
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
192
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
193
+ "model.layers.8.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
194
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
195
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
196
+ "model.layers.9.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
197
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
198
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
199
+ "model.layers.9.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
200
+ "model.norm.weight": "model-00002-of-00002.safetensors"
201
+ }
202
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": true,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "32000": {
31
+ "content": "<|endoftext|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<|assistant|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": true,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "32002": {
47
+ "content": "<|placeholder1|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": true,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "32003": {
55
+ "content": "<|placeholder2|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": true,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "32004": {
63
+ "content": "<|placeholder3|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": true,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "32005": {
71
+ "content": "<|placeholder4|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": true,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "32006": {
79
+ "content": "<|system|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": true,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "32007": {
87
+ "content": "<|end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": true,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "32008": {
95
+ "content": "<|placeholder5|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": true,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "32009": {
103
+ "content": "<|placeholder6|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": true,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "32010": {
111
+ "content": "<|user|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": true,
115
+ "single_word": false,
116
+ "special": true
117
+ }
118
+ },
119
+ "bos_token": "<s>",
120
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{'<|system|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'user' %}{{'<|user|>\n' + message['content'] + '<|end|>\n'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
121
+ "clean_up_tokenization_spaces": false,
122
+ "eos_token": "<|endoftext|>",
123
+ "extra_special_tokens": {},
124
+ "legacy": false,
125
+ "model_max_length": 131072,
126
+ "pad_token": "<|endoftext|>",
127
+ "padding_side": "left",
128
+ "sp_model_kwargs": {},
129
+ "tokenizer_class": "LlamaTokenizer",
130
+ "unk_token": "<unk>",
131
+ "use_default_system_prompt": false
132
+ }
trainer_state.json ADDED
@@ -0,0 +1,1606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.9990925589836661,
6
+ "eval_steps": 500,
7
+ "global_step": 1101,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.004537205081669692,
14
+ "grad_norm": 1.46875,
15
+ "learning_rate": 1.6907951020408165e-05,
16
+ "loss": 2.0714,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.009074410163339383,
21
+ "grad_norm": 0.921875,
22
+ "learning_rate": 3.804288979591837e-05,
23
+ "loss": 1.9914,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.013611615245009074,
28
+ "grad_norm": 0.85546875,
29
+ "learning_rate": 5.917782857142858e-05,
30
+ "loss": 1.9559,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.018148820326678767,
35
+ "grad_norm": 0.9453125,
36
+ "learning_rate": 8.031276734693878e-05,
37
+ "loss": 1.9009,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.022686025408348458,
42
+ "grad_norm": 0.890625,
43
+ "learning_rate": 0.000101447706122449,
44
+ "loss": 1.8664,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.02722323049001815,
49
+ "grad_norm": 0.703125,
50
+ "learning_rate": 0.0001225826448979592,
51
+ "loss": 1.9462,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.03176043557168784,
56
+ "grad_norm": 0.75390625,
57
+ "learning_rate": 0.0001437175836734694,
58
+ "loss": 1.915,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.036297640653357534,
63
+ "grad_norm": 0.6953125,
64
+ "learning_rate": 0.00014794416201860397,
65
+ "loss": 1.8992,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.04083484573502722,
70
+ "grad_norm": 0.73046875,
71
+ "learning_rate": 0.00014794249880096695,
72
+ "loss": 1.9069,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.045372050816696916,
77
+ "grad_norm": 0.69140625,
78
+ "learning_rate": 0.00014793955622586344,
79
+ "loss": 1.9164,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.0499092558983666,
84
+ "grad_norm": 0.65234375,
85
+ "learning_rate": 0.00014793533436115207,
86
+ "loss": 1.8856,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.0544464609800363,
91
+ "grad_norm": 0.6796875,
92
+ "learning_rate": 0.0001479298333041932,
93
+ "loss": 1.8771,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.05898366606170599,
98
+ "grad_norm": 0.65234375,
99
+ "learning_rate": 0.00014792305318184665,
100
+ "loss": 1.8856,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.06352087114337568,
105
+ "grad_norm": 0.66796875,
106
+ "learning_rate": 0.00014791499415046867,
107
+ "loss": 1.8609,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.06805807622504537,
112
+ "grad_norm": 0.6796875,
113
+ "learning_rate": 0.00014790565639590848,
114
+ "loss": 1.937,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.07259528130671507,
119
+ "grad_norm": 0.79296875,
120
+ "learning_rate": 0.00014789504013350388,
121
+ "loss": 1.9239,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.07713248638838476,
126
+ "grad_norm": 0.6171875,
127
+ "learning_rate": 0.00014788314560807632,
128
+ "loss": 1.9156,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.08166969147005444,
133
+ "grad_norm": 0.63671875,
134
+ "learning_rate": 0.00014786997309392523,
135
+ "loss": 1.9013,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.08620689655172414,
140
+ "grad_norm": 0.6640625,
141
+ "learning_rate": 0.00014785552289482183,
142
+ "loss": 1.8661,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.09074410163339383,
147
+ "grad_norm": 0.6328125,
148
+ "learning_rate": 0.00014783979534400182,
149
+ "loss": 1.8538,
150
+ "step": 100
151
+ },
152
+ {
153
+ "epoch": 0.09528130671506352,
154
+ "grad_norm": 0.625,
155
+ "learning_rate": 0.0001478227908041581,
156
+ "loss": 1.8926,
157
+ "step": 105
158
+ },
159
+ {
160
+ "epoch": 0.0998185117967332,
161
+ "grad_norm": 0.62890625,
162
+ "learning_rate": 0.00014780450966743198,
163
+ "loss": 1.8817,
164
+ "step": 110
165
+ },
166
+ {
167
+ "epoch": 0.10435571687840291,
168
+ "grad_norm": 0.60546875,
169
+ "learning_rate": 0.00014778495235540456,
170
+ "loss": 1.8889,
171
+ "step": 115
172
+ },
173
+ {
174
+ "epoch": 0.1088929219600726,
175
+ "grad_norm": 0.62109375,
176
+ "learning_rate": 0.00014776411931908664,
177
+ "loss": 1.8341,
178
+ "step": 120
179
+ },
180
+ {
181
+ "epoch": 0.11343012704174228,
182
+ "grad_norm": 0.62109375,
183
+ "learning_rate": 0.00014774201103890853,
184
+ "loss": 1.8489,
185
+ "step": 125
186
+ },
187
+ {
188
+ "epoch": 0.11796733212341198,
189
+ "grad_norm": 0.66796875,
190
+ "learning_rate": 0.00014771862802470895,
191
+ "loss": 1.8688,
192
+ "step": 130
193
+ },
194
+ {
195
+ "epoch": 0.12250453720508167,
196
+ "grad_norm": 0.69140625,
197
+ "learning_rate": 0.00014769397081572318,
198
+ "loss": 1.8304,
199
+ "step": 135
200
+ },
201
+ {
202
+ "epoch": 0.12704174228675136,
203
+ "grad_norm": 0.62109375,
204
+ "learning_rate": 0.00014766803998057077,
205
+ "loss": 1.8114,
206
+ "step": 140
207
+ },
208
+ {
209
+ "epoch": 0.13157894736842105,
210
+ "grad_norm": 0.69140625,
211
+ "learning_rate": 0.00014764083611724224,
212
+ "loss": 1.8612,
213
+ "step": 145
214
+ },
215
+ {
216
+ "epoch": 0.13611615245009073,
217
+ "grad_norm": 0.8828125,
218
+ "learning_rate": 0.00014761235985308546,
219
+ "loss": 1.8585,
220
+ "step": 150
221
+ },
222
+ {
223
+ "epoch": 0.14065335753176045,
224
+ "grad_norm": 0.67578125,
225
+ "learning_rate": 0.00014758261184479108,
226
+ "loss": 1.8307,
227
+ "step": 155
228
+ },
229
+ {
230
+ "epoch": 0.14519056261343014,
231
+ "grad_norm": 0.6328125,
232
+ "learning_rate": 0.0001475515927783775,
233
+ "loss": 1.9006,
234
+ "step": 160
235
+ },
236
+ {
237
+ "epoch": 0.14972776769509982,
238
+ "grad_norm": 0.9921875,
239
+ "learning_rate": 0.00014751930336917481,
240
+ "loss": 1.8725,
241
+ "step": 165
242
+ },
243
+ {
244
+ "epoch": 0.1542649727767695,
245
+ "grad_norm": 0.58984375,
246
+ "learning_rate": 0.00014748574436180864,
247
+ "loss": 1.8519,
248
+ "step": 170
249
+ },
250
+ {
251
+ "epoch": 0.1588021778584392,
252
+ "grad_norm": 0.609375,
253
+ "learning_rate": 0.00014745091653018267,
254
+ "loss": 1.8826,
255
+ "step": 175
256
+ },
257
+ {
258
+ "epoch": 0.16333938294010888,
259
+ "grad_norm": 0.59765625,
260
+ "learning_rate": 0.00014741482067746097,
261
+ "loss": 1.8518,
262
+ "step": 180
263
+ },
264
+ {
265
+ "epoch": 0.16787658802177857,
266
+ "grad_norm": 0.61328125,
267
+ "learning_rate": 0.00014737745763604944,
268
+ "loss": 1.9203,
269
+ "step": 185
270
+ },
271
+ {
272
+ "epoch": 0.1724137931034483,
273
+ "grad_norm": 0.63671875,
274
+ "learning_rate": 0.00014733882826757655,
275
+ "loss": 1.849,
276
+ "step": 190
277
+ },
278
+ {
279
+ "epoch": 0.17695099818511797,
280
+ "grad_norm": 0.62109375,
281
+ "learning_rate": 0.00014729893346287354,
282
+ "loss": 1.8868,
283
+ "step": 195
284
+ },
285
+ {
286
+ "epoch": 0.18148820326678766,
287
+ "grad_norm": 0.60546875,
288
+ "learning_rate": 0.00014725777414195383,
289
+ "loss": 1.8464,
290
+ "step": 200
291
+ },
292
+ {
293
+ "epoch": 0.18602540834845735,
294
+ "grad_norm": 0.796875,
295
+ "learning_rate": 0.00014721535125399195,
296
+ "loss": 1.8716,
297
+ "step": 205
298
+ },
299
+ {
300
+ "epoch": 0.19056261343012704,
301
+ "grad_norm": 0.6171875,
302
+ "learning_rate": 0.0001471716657773013,
303
+ "loss": 1.879,
304
+ "step": 210
305
+ },
306
+ {
307
+ "epoch": 0.19509981851179672,
308
+ "grad_norm": 0.5625,
309
+ "learning_rate": 0.00014712671871931207,
310
+ "loss": 1.8664,
311
+ "step": 215
312
+ },
313
+ {
314
+ "epoch": 0.1996370235934664,
315
+ "grad_norm": 0.6171875,
316
+ "learning_rate": 0.00014708051111654756,
317
+ "loss": 1.8891,
318
+ "step": 220
319
+ },
320
+ {
321
+ "epoch": 0.20417422867513613,
322
+ "grad_norm": 0.55859375,
323
+ "learning_rate": 0.00014703304403460062,
324
+ "loss": 1.8699,
325
+ "step": 225
326
+ },
327
+ {
328
+ "epoch": 0.20871143375680581,
329
+ "grad_norm": 0.58203125,
330
+ "learning_rate": 0.00014698431856810878,
331
+ "loss": 1.7784,
332
+ "step": 230
333
+ },
334
+ {
335
+ "epoch": 0.2132486388384755,
336
+ "grad_norm": 0.83984375,
337
+ "learning_rate": 0.00014693433584072926,
338
+ "loss": 1.8291,
339
+ "step": 235
340
+ },
341
+ {
342
+ "epoch": 0.2177858439201452,
343
+ "grad_norm": 0.56640625,
344
+ "learning_rate": 0.00014688309700511298,
345
+ "loss": 1.831,
346
+ "step": 240
347
+ },
348
+ {
349
+ "epoch": 0.22232304900181488,
350
+ "grad_norm": 0.58984375,
351
+ "learning_rate": 0.00014683060324287783,
352
+ "loss": 1.8502,
353
+ "step": 245
354
+ },
355
+ {
356
+ "epoch": 0.22686025408348456,
357
+ "grad_norm": 0.5859375,
358
+ "learning_rate": 0.00014677685576458164,
359
+ "loss": 1.8986,
360
+ "step": 250
361
+ },
362
+ {
363
+ "epoch": 0.23139745916515425,
364
+ "grad_norm": 0.5859375,
365
+ "learning_rate": 0.00014672185580969416,
366
+ "loss": 1.7958,
367
+ "step": 255
368
+ },
369
+ {
370
+ "epoch": 0.23593466424682397,
371
+ "grad_norm": 0.578125,
372
+ "learning_rate": 0.00014666560464656842,
373
+ "loss": 1.8392,
374
+ "step": 260
375
+ },
376
+ {
377
+ "epoch": 0.24047186932849365,
378
+ "grad_norm": 0.67578125,
379
+ "learning_rate": 0.0001466081035724116,
380
+ "loss": 1.8503,
381
+ "step": 265
382
+ },
383
+ {
384
+ "epoch": 0.24500907441016334,
385
+ "grad_norm": 0.6015625,
386
+ "learning_rate": 0.00014654935391325503,
387
+ "loss": 1.8042,
388
+ "step": 270
389
+ },
390
+ {
391
+ "epoch": 0.24954627949183303,
392
+ "grad_norm": 0.6171875,
393
+ "learning_rate": 0.0001464893570239237,
394
+ "loss": 1.8489,
395
+ "step": 275
396
+ },
397
+ {
398
+ "epoch": 0.2540834845735027,
399
+ "grad_norm": 0.56640625,
400
+ "learning_rate": 0.00014642811428800486,
401
+ "loss": 1.786,
402
+ "step": 280
403
+ },
404
+ {
405
+ "epoch": 0.25862068965517243,
406
+ "grad_norm": 0.578125,
407
+ "learning_rate": 0.00014636562711781623,
408
+ "loss": 1.7963,
409
+ "step": 285
410
+ },
411
+ {
412
+ "epoch": 0.2631578947368421,
413
+ "grad_norm": 0.70703125,
414
+ "learning_rate": 0.00014630189695437348,
415
+ "loss": 1.8334,
416
+ "step": 290
417
+ },
418
+ {
419
+ "epoch": 0.2676950998185118,
420
+ "grad_norm": 0.515625,
421
+ "learning_rate": 0.00014623692526735687,
422
+ "loss": 1.8442,
423
+ "step": 295
424
+ },
425
+ {
426
+ "epoch": 0.27223230490018147,
427
+ "grad_norm": 0.57421875,
428
+ "learning_rate": 0.0001461707135550774,
429
+ "loss": 1.8511,
430
+ "step": 300
431
+ },
432
+ {
433
+ "epoch": 0.2767695099818512,
434
+ "grad_norm": 1.2734375,
435
+ "learning_rate": 0.0001461032633444423,
436
+ "loss": 1.8057,
437
+ "step": 305
438
+ },
439
+ {
440
+ "epoch": 0.2813067150635209,
441
+ "grad_norm": 0.64453125,
442
+ "learning_rate": 0.00014603457619091978,
443
+ "loss": 1.8314,
444
+ "step": 310
445
+ },
446
+ {
447
+ "epoch": 0.28584392014519056,
448
+ "grad_norm": 0.5703125,
449
+ "learning_rate": 0.00014596465367850323,
450
+ "loss": 1.8663,
451
+ "step": 315
452
+ },
453
+ {
454
+ "epoch": 0.29038112522686027,
455
+ "grad_norm": 0.58984375,
456
+ "learning_rate": 0.0001458934974196745,
457
+ "loss": 1.8317,
458
+ "step": 320
459
+ },
460
+ {
461
+ "epoch": 0.29491833030852993,
462
+ "grad_norm": 0.53125,
463
+ "learning_rate": 0.000145821109055367,
464
+ "loss": 1.8226,
465
+ "step": 325
466
+ },
467
+ {
468
+ "epoch": 0.29945553539019965,
469
+ "grad_norm": 0.55078125,
470
+ "learning_rate": 0.00014574749025492755,
471
+ "loss": 1.806,
472
+ "step": 330
473
+ },
474
+ {
475
+ "epoch": 0.3039927404718693,
476
+ "grad_norm": 0.703125,
477
+ "learning_rate": 0.0001456726427160782,
478
+ "loss": 1.8697,
479
+ "step": 335
480
+ },
481
+ {
482
+ "epoch": 0.308529945553539,
483
+ "grad_norm": 0.5859375,
484
+ "learning_rate": 0.00014559656816487678,
485
+ "loss": 1.8708,
486
+ "step": 340
487
+ },
488
+ {
489
+ "epoch": 0.31306715063520874,
490
+ "grad_norm": 0.64453125,
491
+ "learning_rate": 0.0001455192683556773,
492
+ "loss": 1.7742,
493
+ "step": 345
494
+ },
495
+ {
496
+ "epoch": 0.3176043557168784,
497
+ "grad_norm": 0.56640625,
498
+ "learning_rate": 0.0001454407450710894,
499
+ "loss": 1.8335,
500
+ "step": 350
501
+ },
502
+ {
503
+ "epoch": 0.3221415607985481,
504
+ "grad_norm": 0.5390625,
505
+ "learning_rate": 0.00014536100012193726,
506
+ "loss": 1.8305,
507
+ "step": 355
508
+ },
509
+ {
510
+ "epoch": 0.32667876588021777,
511
+ "grad_norm": 0.6953125,
512
+ "learning_rate": 0.0001452800353472179,
513
+ "loss": 1.8292,
514
+ "step": 360
515
+ },
516
+ {
517
+ "epoch": 0.3312159709618875,
518
+ "grad_norm": 0.53125,
519
+ "learning_rate": 0.00014519785261405869,
520
+ "loss": 1.8466,
521
+ "step": 365
522
+ },
523
+ {
524
+ "epoch": 0.33575317604355714,
525
+ "grad_norm": 0.53125,
526
+ "learning_rate": 0.00014511445381767423,
527
+ "loss": 1.8178,
528
+ "step": 370
529
+ },
530
+ {
531
+ "epoch": 0.34029038112522686,
532
+ "grad_norm": 0.5234375,
533
+ "learning_rate": 0.00014502984088132293,
534
+ "loss": 1.7921,
535
+ "step": 375
536
+ },
537
+ {
538
+ "epoch": 0.3448275862068966,
539
+ "grad_norm": 0.5703125,
540
+ "learning_rate": 0.00014494401575626225,
541
+ "loss": 1.8203,
542
+ "step": 380
543
+ },
544
+ {
545
+ "epoch": 0.34936479128856623,
546
+ "grad_norm": 0.578125,
547
+ "learning_rate": 0.00014485698042170404,
548
+ "loss": 1.7927,
549
+ "step": 385
550
+ },
551
+ {
552
+ "epoch": 0.35390199637023595,
553
+ "grad_norm": 0.56640625,
554
+ "learning_rate": 0.00014476873688476876,
555
+ "loss": 1.8242,
556
+ "step": 390
557
+ },
558
+ {
559
+ "epoch": 0.3584392014519056,
560
+ "grad_norm": 0.57421875,
561
+ "learning_rate": 0.0001446792871804392,
562
+ "loss": 1.8131,
563
+ "step": 395
564
+ },
565
+ {
566
+ "epoch": 0.3629764065335753,
567
+ "grad_norm": 0.546875,
568
+ "learning_rate": 0.00014458863337151349,
569
+ "loss": 1.8587,
570
+ "step": 400
571
+ },
572
+ {
573
+ "epoch": 0.367513611615245,
574
+ "grad_norm": 0.6640625,
575
+ "learning_rate": 0.0001444967775485577,
576
+ "loss": 1.7851,
577
+ "step": 405
578
+ },
579
+ {
580
+ "epoch": 0.3720508166969147,
581
+ "grad_norm": 0.55078125,
582
+ "learning_rate": 0.00014440372182985745,
583
+ "loss": 1.8071,
584
+ "step": 410
585
+ },
586
+ {
587
+ "epoch": 0.3765880217785844,
588
+ "grad_norm": 0.5703125,
589
+ "learning_rate": 0.00014430946836136918,
590
+ "loss": 1.7995,
591
+ "step": 415
592
+ },
593
+ {
594
+ "epoch": 0.3811252268602541,
595
+ "grad_norm": 0.60546875,
596
+ "learning_rate": 0.0001442140193166706,
597
+ "loss": 1.7947,
598
+ "step": 420
599
+ },
600
+ {
601
+ "epoch": 0.3856624319419238,
602
+ "grad_norm": 0.5546875,
603
+ "learning_rate": 0.00014411737689691058,
604
+ "loss": 1.7898,
605
+ "step": 425
606
+ },
607
+ {
608
+ "epoch": 0.39019963702359345,
609
+ "grad_norm": 0.81640625,
610
+ "learning_rate": 0.0001440195433307584,
611
+ "loss": 1.8306,
612
+ "step": 430
613
+ },
614
+ {
615
+ "epoch": 0.39473684210526316,
616
+ "grad_norm": 0.578125,
617
+ "learning_rate": 0.00014392052087435238,
618
+ "loss": 1.7914,
619
+ "step": 435
620
+ },
621
+ {
622
+ "epoch": 0.3992740471869328,
623
+ "grad_norm": 0.6171875,
624
+ "learning_rate": 0.00014382031181124774,
625
+ "loss": 1.8376,
626
+ "step": 440
627
+ },
628
+ {
629
+ "epoch": 0.40381125226860254,
630
+ "grad_norm": 0.5625,
631
+ "learning_rate": 0.00014371891845236406,
632
+ "loss": 1.7989,
633
+ "step": 445
634
+ },
635
+ {
636
+ "epoch": 0.40834845735027225,
637
+ "grad_norm": 0.5625,
638
+ "learning_rate": 0.00014361634313593197,
639
+ "loss": 1.8062,
640
+ "step": 450
641
+ },
642
+ {
643
+ "epoch": 0.4128856624319419,
644
+ "grad_norm": 0.671875,
645
+ "learning_rate": 0.0001435125882274392,
646
+ "loss": 1.831,
647
+ "step": 455
648
+ },
649
+ {
650
+ "epoch": 0.41742286751361163,
651
+ "grad_norm": 0.65234375,
652
+ "learning_rate": 0.00014340765611957598,
653
+ "loss": 1.7798,
654
+ "step": 460
655
+ },
656
+ {
657
+ "epoch": 0.4219600725952813,
658
+ "grad_norm": 0.5703125,
659
+ "learning_rate": 0.00014330154923217997,
660
+ "loss": 1.8022,
661
+ "step": 465
662
+ },
663
+ {
664
+ "epoch": 0.426497277676951,
665
+ "grad_norm": 0.640625,
666
+ "learning_rate": 0.0001431942700121804,
667
+ "loss": 1.8054,
668
+ "step": 470
669
+ },
670
+ {
671
+ "epoch": 0.43103448275862066,
672
+ "grad_norm": 0.5234375,
673
+ "learning_rate": 0.0001430858209335416,
674
+ "loss": 1.8397,
675
+ "step": 475
676
+ },
677
+ {
678
+ "epoch": 0.4355716878402904,
679
+ "grad_norm": 0.57421875,
680
+ "learning_rate": 0.00014297620449720602,
681
+ "loss": 1.8211,
682
+ "step": 480
683
+ },
684
+ {
685
+ "epoch": 0.4401088929219601,
686
+ "grad_norm": 0.65234375,
687
+ "learning_rate": 0.00014286542323103656,
688
+ "loss": 1.8092,
689
+ "step": 485
690
+ },
691
+ {
692
+ "epoch": 0.44464609800362975,
693
+ "grad_norm": 0.62109375,
694
+ "learning_rate": 0.00014275347968975818,
695
+ "loss": 1.8197,
696
+ "step": 490
697
+ },
698
+ {
699
+ "epoch": 0.44918330308529947,
700
+ "grad_norm": 0.56640625,
701
+ "learning_rate": 0.00014264037645489905,
702
+ "loss": 1.7599,
703
+ "step": 495
704
+ },
705
+ {
706
+ "epoch": 0.4537205081669691,
707
+ "grad_norm": 0.6015625,
708
+ "learning_rate": 0.0001425261161347311,
709
+ "loss": 1.785,
710
+ "step": 500
711
+ },
712
+ {
713
+ "epoch": 0.4537205081669691,
714
+ "eval_loss": 1.744194507598877,
715
+ "eval_runtime": 4.2905,
716
+ "eval_samples_per_second": 14.917,
717
+ "eval_steps_per_second": 14.917,
718
+ "step": 500
719
+ },
720
+ {
721
+ "epoch": 0.45825771324863884,
722
+ "grad_norm": 0.57421875,
723
+ "learning_rate": 0.00014241070136420967,
724
+ "loss": 1.7713,
725
+ "step": 505
726
+ },
727
+ {
728
+ "epoch": 0.4627949183303085,
729
+ "grad_norm": 0.57421875,
730
+ "learning_rate": 0.00014229413480491295,
731
+ "loss": 1.77,
732
+ "step": 510
733
+ },
734
+ {
735
+ "epoch": 0.4673321234119782,
736
+ "grad_norm": 0.59375,
737
+ "learning_rate": 0.00014217641914498046,
738
+ "loss": 1.8009,
739
+ "step": 515
740
+ },
741
+ {
742
+ "epoch": 0.47186932849364793,
743
+ "grad_norm": 0.671875,
744
+ "learning_rate": 0.00014205755709905117,
745
+ "loss": 1.8003,
746
+ "step": 520
747
+ },
748
+ {
749
+ "epoch": 0.4764065335753176,
750
+ "grad_norm": 0.78125,
751
+ "learning_rate": 0.00014193755140820072,
752
+ "loss": 1.8183,
753
+ "step": 525
754
+ },
755
+ {
756
+ "epoch": 0.4809437386569873,
757
+ "grad_norm": 0.55859375,
758
+ "learning_rate": 0.00014181640483987848,
759
+ "loss": 1.8192,
760
+ "step": 530
761
+ },
762
+ {
763
+ "epoch": 0.48548094373865697,
764
+ "grad_norm": 0.62890625,
765
+ "learning_rate": 0.00014169412018784347,
766
+ "loss": 1.7721,
767
+ "step": 535
768
+ },
769
+ {
770
+ "epoch": 0.4900181488203267,
771
+ "grad_norm": 0.66015625,
772
+ "learning_rate": 0.0001415707002721001,
773
+ "loss": 1.8054,
774
+ "step": 540
775
+ },
776
+ {
777
+ "epoch": 0.4945553539019964,
778
+ "grad_norm": 0.6328125,
779
+ "learning_rate": 0.000141446147938833,
780
+ "loss": 1.7558,
781
+ "step": 545
782
+ },
783
+ {
784
+ "epoch": 0.49909255898366606,
785
+ "grad_norm": 0.55078125,
786
+ "learning_rate": 0.00014132046606034153,
787
+ "loss": 1.8106,
788
+ "step": 550
789
+ },
790
+ {
791
+ "epoch": 0.5036297640653358,
792
+ "grad_norm": 0.5625,
793
+ "learning_rate": 0.0001411936575349735,
794
+ "loss": 1.8088,
795
+ "step": 555
796
+ },
797
+ {
798
+ "epoch": 0.5081669691470054,
799
+ "grad_norm": 0.54296875,
800
+ "learning_rate": 0.00014106572528705823,
801
+ "loss": 1.8421,
802
+ "step": 560
803
+ },
804
+ {
805
+ "epoch": 0.5127041742286751,
806
+ "grad_norm": 0.52734375,
807
+ "learning_rate": 0.0001409366722668392,
808
+ "loss": 1.8159,
809
+ "step": 565
810
+ },
811
+ {
812
+ "epoch": 0.5172413793103449,
813
+ "grad_norm": 0.53515625,
814
+ "learning_rate": 0.00014080650145040604,
815
+ "loss": 1.7016,
816
+ "step": 570
817
+ },
818
+ {
819
+ "epoch": 0.5217785843920145,
820
+ "grad_norm": 0.55078125,
821
+ "learning_rate": 0.00014067521583962587,
822
+ "loss": 1.7653,
823
+ "step": 575
824
+ },
825
+ {
826
+ "epoch": 0.5263157894736842,
827
+ "grad_norm": 0.53515625,
828
+ "learning_rate": 0.000140542818462074,
829
+ "loss": 1.7371,
830
+ "step": 580
831
+ },
832
+ {
833
+ "epoch": 0.530852994555354,
834
+ "grad_norm": 0.5078125,
835
+ "learning_rate": 0.00014040931237096425,
836
+ "loss": 1.7618,
837
+ "step": 585
838
+ },
839
+ {
840
+ "epoch": 0.5353901996370236,
841
+ "grad_norm": 0.56640625,
842
+ "learning_rate": 0.00014027470064507838,
843
+ "loss": 1.8225,
844
+ "step": 590
845
+ },
846
+ {
847
+ "epoch": 0.5399274047186933,
848
+ "grad_norm": 0.56640625,
849
+ "learning_rate": 0.00014013898638869527,
850
+ "loss": 1.8137,
851
+ "step": 595
852
+ },
853
+ {
854
+ "epoch": 0.5444646098003629,
855
+ "grad_norm": 0.6875,
856
+ "learning_rate": 0.00014000217273151913,
857
+ "loss": 1.75,
858
+ "step": 600
859
+ },
860
+ {
861
+ "epoch": 0.5490018148820327,
862
+ "grad_norm": 0.55078125,
863
+ "learning_rate": 0.00013986426282860755,
864
+ "loss": 1.7745,
865
+ "step": 605
866
+ },
867
+ {
868
+ "epoch": 0.5535390199637024,
869
+ "grad_norm": 0.5625,
870
+ "learning_rate": 0.00013972525986029856,
871
+ "loss": 1.7694,
872
+ "step": 610
873
+ },
874
+ {
875
+ "epoch": 0.558076225045372,
876
+ "grad_norm": 0.55078125,
877
+ "learning_rate": 0.00013958516703213735,
878
+ "loss": 1.75,
879
+ "step": 615
880
+ },
881
+ {
882
+ "epoch": 0.5626134301270418,
883
+ "grad_norm": 0.5546875,
884
+ "learning_rate": 0.00013944398757480237,
885
+ "loss": 1.7743,
886
+ "step": 620
887
+ },
888
+ {
889
+ "epoch": 0.5671506352087115,
890
+ "grad_norm": 0.49609375,
891
+ "learning_rate": 0.00013930172474403083,
892
+ "loss": 1.7893,
893
+ "step": 625
894
+ },
895
+ {
896
+ "epoch": 0.5716878402903811,
897
+ "grad_norm": 0.52734375,
898
+ "learning_rate": 0.00013915838182054354,
899
+ "loss": 1.7614,
900
+ "step": 630
901
+ },
902
+ {
903
+ "epoch": 0.5762250453720508,
904
+ "grad_norm": 0.60546875,
905
+ "learning_rate": 0.0001390139621099693,
906
+ "loss": 1.7126,
907
+ "step": 635
908
+ },
909
+ {
910
+ "epoch": 0.5807622504537205,
911
+ "grad_norm": 0.58203125,
912
+ "learning_rate": 0.0001388684689427688,
913
+ "loss": 1.7882,
914
+ "step": 640
915
+ },
916
+ {
917
+ "epoch": 0.5852994555353902,
918
+ "grad_norm": 0.55859375,
919
+ "learning_rate": 0.0001387219056741576,
920
+ "loss": 1.7766,
921
+ "step": 645
922
+ },
923
+ {
924
+ "epoch": 0.5898366606170599,
925
+ "grad_norm": 0.52734375,
926
+ "learning_rate": 0.0001385742756840288,
927
+ "loss": 1.7522,
928
+ "step": 650
929
+ },
930
+ {
931
+ "epoch": 0.5943738656987296,
932
+ "grad_norm": 0.5546875,
933
+ "learning_rate": 0.0001384255823768753,
934
+ "loss": 1.8185,
935
+ "step": 655
936
+ },
937
+ {
938
+ "epoch": 0.5989110707803993,
939
+ "grad_norm": 0.6640625,
940
+ "learning_rate": 0.00013827582918171102,
941
+ "loss": 1.786,
942
+ "step": 660
943
+ },
944
+ {
945
+ "epoch": 0.603448275862069,
946
+ "grad_norm": 0.59765625,
947
+ "learning_rate": 0.00013812501955199195,
948
+ "loss": 1.8142,
949
+ "step": 665
950
+ },
951
+ {
952
+ "epoch": 0.6079854809437386,
953
+ "grad_norm": 0.6015625,
954
+ "learning_rate": 0.00013797315696553652,
955
+ "loss": 1.8174,
956
+ "step": 670
957
+ },
958
+ {
959
+ "epoch": 0.6125226860254084,
960
+ "grad_norm": 0.53125,
961
+ "learning_rate": 0.00013782024492444542,
962
+ "loss": 1.7349,
963
+ "step": 675
964
+ },
965
+ {
966
+ "epoch": 0.617059891107078,
967
+ "grad_norm": 0.58203125,
968
+ "learning_rate": 0.0001376662869550207,
969
+ "loss": 1.7705,
970
+ "step": 680
971
+ },
972
+ {
973
+ "epoch": 0.6215970961887477,
974
+ "grad_norm": 0.5703125,
975
+ "learning_rate": 0.00013751128660768468,
976
+ "loss": 1.7593,
977
+ "step": 685
978
+ },
979
+ {
980
+ "epoch": 0.6261343012704175,
981
+ "grad_norm": 0.609375,
982
+ "learning_rate": 0.0001373552474568978,
983
+ "loss": 1.8741,
984
+ "step": 690
985
+ },
986
+ {
987
+ "epoch": 0.6306715063520871,
988
+ "grad_norm": 0.56640625,
989
+ "learning_rate": 0.0001371981731010764,
990
+ "loss": 1.7808,
991
+ "step": 695
992
+ },
993
+ {
994
+ "epoch": 0.6352087114337568,
995
+ "grad_norm": 0.5703125,
996
+ "learning_rate": 0.0001370400671625097,
997
+ "loss": 1.712,
998
+ "step": 700
999
+ },
1000
+ {
1001
+ "epoch": 0.6397459165154264,
1002
+ "grad_norm": 1.2578125,
1003
+ "learning_rate": 0.00013688093328727613,
1004
+ "loss": 1.7588,
1005
+ "step": 705
1006
+ },
1007
+ {
1008
+ "epoch": 0.6442831215970962,
1009
+ "grad_norm": 0.64453125,
1010
+ "learning_rate": 0.00013672077514515946,
1011
+ "loss": 1.7494,
1012
+ "step": 710
1013
+ },
1014
+ {
1015
+ "epoch": 0.6488203266787659,
1016
+ "grad_norm": 0.59375,
1017
+ "learning_rate": 0.00013655959642956399,
1018
+ "loss": 1.7665,
1019
+ "step": 715
1020
+ },
1021
+ {
1022
+ "epoch": 0.6533575317604355,
1023
+ "grad_norm": 0.54296875,
1024
+ "learning_rate": 0.00013639740085742951,
1025
+ "loss": 1.7562,
1026
+ "step": 720
1027
+ },
1028
+ {
1029
+ "epoch": 0.6578947368421053,
1030
+ "grad_norm": 0.55078125,
1031
+ "learning_rate": 0.00013623419216914545,
1032
+ "loss": 1.7741,
1033
+ "step": 725
1034
+ },
1035
+ {
1036
+ "epoch": 0.662431941923775,
1037
+ "grad_norm": 0.51953125,
1038
+ "learning_rate": 0.00013606997412846474,
1039
+ "loss": 1.7657,
1040
+ "step": 730
1041
+ },
1042
+ {
1043
+ "epoch": 0.6669691470054446,
1044
+ "grad_norm": 0.53515625,
1045
+ "learning_rate": 0.00013590475052241695,
1046
+ "loss": 1.714,
1047
+ "step": 735
1048
+ },
1049
+ {
1050
+ "epoch": 0.6715063520871143,
1051
+ "grad_norm": 0.546875,
1052
+ "learning_rate": 0.00013573852516122104,
1053
+ "loss": 1.7763,
1054
+ "step": 740
1055
+ },
1056
+ {
1057
+ "epoch": 0.6760435571687841,
1058
+ "grad_norm": 0.57421875,
1059
+ "learning_rate": 0.0001355713018781973,
1060
+ "loss": 1.7801,
1061
+ "step": 745
1062
+ },
1063
+ {
1064
+ "epoch": 0.6805807622504537,
1065
+ "grad_norm": 0.54296875,
1066
+ "learning_rate": 0.00013540308452967919,
1067
+ "loss": 1.7655,
1068
+ "step": 750
1069
+ },
1070
+ {
1071
+ "epoch": 0.6851179673321234,
1072
+ "grad_norm": 0.62109375,
1073
+ "learning_rate": 0.00013523387699492426,
1074
+ "loss": 1.7433,
1075
+ "step": 755
1076
+ },
1077
+ {
1078
+ "epoch": 0.6896551724137931,
1079
+ "grad_norm": 0.53515625,
1080
+ "learning_rate": 0.00013506368317602475,
1081
+ "loss": 1.8206,
1082
+ "step": 760
1083
+ },
1084
+ {
1085
+ "epoch": 0.6941923774954628,
1086
+ "grad_norm": 0.5390625,
1087
+ "learning_rate": 0.00013489250699781752,
1088
+ "loss": 1.7338,
1089
+ "step": 765
1090
+ },
1091
+ {
1092
+ "epoch": 0.6987295825771325,
1093
+ "grad_norm": 0.53125,
1094
+ "learning_rate": 0.0001347203524077937,
1095
+ "loss": 1.7672,
1096
+ "step": 770
1097
+ },
1098
+ {
1099
+ "epoch": 0.7032667876588021,
1100
+ "grad_norm": 0.578125,
1101
+ "learning_rate": 0.0001345472233760075,
1102
+ "loss": 1.7835,
1103
+ "step": 775
1104
+ },
1105
+ {
1106
+ "epoch": 0.7078039927404719,
1107
+ "grad_norm": 0.59765625,
1108
+ "learning_rate": 0.0001343731238949847,
1109
+ "loss": 1.7657,
1110
+ "step": 780
1111
+ },
1112
+ {
1113
+ "epoch": 0.7123411978221416,
1114
+ "grad_norm": 0.60546875,
1115
+ "learning_rate": 0.0001341980579796306,
1116
+ "loss": 1.7414,
1117
+ "step": 785
1118
+ },
1119
+ {
1120
+ "epoch": 0.7168784029038112,
1121
+ "grad_norm": 0.53125,
1122
+ "learning_rate": 0.00013402202966713748,
1123
+ "loss": 1.7147,
1124
+ "step": 790
1125
+ },
1126
+ {
1127
+ "epoch": 0.721415607985481,
1128
+ "grad_norm": 0.498046875,
1129
+ "learning_rate": 0.0001338450430168914,
1130
+ "loss": 1.7195,
1131
+ "step": 795
1132
+ },
1133
+ {
1134
+ "epoch": 0.7259528130671506,
1135
+ "grad_norm": 0.62109375,
1136
+ "learning_rate": 0.0001336671021103786,
1137
+ "loss": 1.7685,
1138
+ "step": 800
1139
+ },
1140
+ {
1141
+ "epoch": 0.7304900181488203,
1142
+ "grad_norm": 0.63671875,
1143
+ "learning_rate": 0.0001334882110510915,
1144
+ "loss": 1.7556,
1145
+ "step": 805
1146
+ },
1147
+ {
1148
+ "epoch": 0.73502722323049,
1149
+ "grad_norm": 0.5625,
1150
+ "learning_rate": 0.00013330837396443391,
1151
+ "loss": 1.7322,
1152
+ "step": 810
1153
+ },
1154
+ {
1155
+ "epoch": 0.7395644283121597,
1156
+ "grad_norm": 0.5703125,
1157
+ "learning_rate": 0.00013312759499762596,
1158
+ "loss": 1.7304,
1159
+ "step": 815
1160
+ },
1161
+ {
1162
+ "epoch": 0.7441016333938294,
1163
+ "grad_norm": 0.5390625,
1164
+ "learning_rate": 0.0001329458783196085,
1165
+ "loss": 1.7372,
1166
+ "step": 820
1167
+ },
1168
+ {
1169
+ "epoch": 0.7486388384754991,
1170
+ "grad_norm": 0.56640625,
1171
+ "learning_rate": 0.00013276322812094687,
1172
+ "loss": 1.7411,
1173
+ "step": 825
1174
+ },
1175
+ {
1176
+ "epoch": 0.7531760435571688,
1177
+ "grad_norm": 0.54296875,
1178
+ "learning_rate": 0.00013257964861373438,
1179
+ "loss": 1.791,
1180
+ "step": 830
1181
+ },
1182
+ {
1183
+ "epoch": 0.7577132486388385,
1184
+ "grad_norm": 0.64453125,
1185
+ "learning_rate": 0.0001323951440314951,
1186
+ "loss": 1.7303,
1187
+ "step": 835
1188
+ },
1189
+ {
1190
+ "epoch": 0.7622504537205081,
1191
+ "grad_norm": 0.5390625,
1192
+ "learning_rate": 0.00013220971862908614,
1193
+ "loss": 1.7399,
1194
+ "step": 840
1195
+ },
1196
+ {
1197
+ "epoch": 0.7667876588021778,
1198
+ "grad_norm": 0.58203125,
1199
+ "learning_rate": 0.00013202337668259976,
1200
+ "loss": 1.7357,
1201
+ "step": 845
1202
+ },
1203
+ {
1204
+ "epoch": 0.7713248638838476,
1205
+ "grad_norm": 0.5234375,
1206
+ "learning_rate": 0.00013183612248926458,
1207
+ "loss": 1.7571,
1208
+ "step": 850
1209
+ },
1210
+ {
1211
+ "epoch": 0.7758620689655172,
1212
+ "grad_norm": 0.515625,
1213
+ "learning_rate": 0.00013164796036734647,
1214
+ "loss": 1.7881,
1215
+ "step": 855
1216
+ },
1217
+ {
1218
+ "epoch": 0.7803992740471869,
1219
+ "grad_norm": 0.63671875,
1220
+ "learning_rate": 0.00013145889465604913,
1221
+ "loss": 1.7136,
1222
+ "step": 860
1223
+ },
1224
+ {
1225
+ "epoch": 0.7849364791288567,
1226
+ "grad_norm": 0.5625,
1227
+ "learning_rate": 0.00013126892971541387,
1228
+ "loss": 1.7307,
1229
+ "step": 865
1230
+ },
1231
+ {
1232
+ "epoch": 0.7894736842105263,
1233
+ "grad_norm": 0.6640625,
1234
+ "learning_rate": 0.0001310780699262191,
1235
+ "loss": 1.7888,
1236
+ "step": 870
1237
+ },
1238
+ {
1239
+ "epoch": 0.794010889292196,
1240
+ "grad_norm": 0.5390625,
1241
+ "learning_rate": 0.00013088631968987934,
1242
+ "loss": 1.7088,
1243
+ "step": 875
1244
+ },
1245
+ {
1246
+ "epoch": 0.7985480943738656,
1247
+ "grad_norm": 0.51953125,
1248
+ "learning_rate": 0.00013069368342834368,
1249
+ "loss": 1.796,
1250
+ "step": 880
1251
+ },
1252
+ {
1253
+ "epoch": 0.8030852994555354,
1254
+ "grad_norm": 0.57421875,
1255
+ "learning_rate": 0.00013050016558399384,
1256
+ "loss": 1.708,
1257
+ "step": 885
1258
+ },
1259
+ {
1260
+ "epoch": 0.8076225045372051,
1261
+ "grad_norm": 0.515625,
1262
+ "learning_rate": 0.00013030577061954167,
1263
+ "loss": 1.7503,
1264
+ "step": 890
1265
+ },
1266
+ {
1267
+ "epoch": 0.8121597096188747,
1268
+ "grad_norm": 0.64453125,
1269
+ "learning_rate": 0.00013011050301792632,
1270
+ "loss": 1.7318,
1271
+ "step": 895
1272
+ },
1273
+ {
1274
+ "epoch": 0.8166969147005445,
1275
+ "grad_norm": 0.53515625,
1276
+ "learning_rate": 0.00012991436728221082,
1277
+ "loss": 1.7336,
1278
+ "step": 900
1279
+ },
1280
+ {
1281
+ "epoch": 0.8212341197822142,
1282
+ "grad_norm": 0.5078125,
1283
+ "learning_rate": 0.00012971736793547815,
1284
+ "loss": 1.7983,
1285
+ "step": 905
1286
+ },
1287
+ {
1288
+ "epoch": 0.8257713248638838,
1289
+ "grad_norm": 0.52734375,
1290
+ "learning_rate": 0.00012951950952072713,
1291
+ "loss": 1.7139,
1292
+ "step": 910
1293
+ },
1294
+ {
1295
+ "epoch": 0.8303085299455535,
1296
+ "grad_norm": 0.5234375,
1297
+ "learning_rate": 0.00012932079660076738,
1298
+ "loss": 1.7785,
1299
+ "step": 915
1300
+ },
1301
+ {
1302
+ "epoch": 0.8348457350272233,
1303
+ "grad_norm": 0.58203125,
1304
+ "learning_rate": 0.0001291212337581144,
1305
+ "loss": 1.7315,
1306
+ "step": 920
1307
+ },
1308
+ {
1309
+ "epoch": 0.8393829401088929,
1310
+ "grad_norm": 0.51171875,
1311
+ "learning_rate": 0.00012892082559488361,
1312
+ "loss": 1.7943,
1313
+ "step": 925
1314
+ },
1315
+ {
1316
+ "epoch": 0.8439201451905626,
1317
+ "grad_norm": 0.59765625,
1318
+ "learning_rate": 0.0001287195767326845,
1319
+ "loss": 1.7686,
1320
+ "step": 930
1321
+ },
1322
+ {
1323
+ "epoch": 0.8484573502722323,
1324
+ "grad_norm": 0.50390625,
1325
+ "learning_rate": 0.00012851749181251373,
1326
+ "loss": 1.7367,
1327
+ "step": 935
1328
+ },
1329
+ {
1330
+ "epoch": 0.852994555353902,
1331
+ "grad_norm": 0.57421875,
1332
+ "learning_rate": 0.00012831457549464847,
1333
+ "loss": 1.7854,
1334
+ "step": 940
1335
+ },
1336
+ {
1337
+ "epoch": 0.8575317604355717,
1338
+ "grad_norm": 0.578125,
1339
+ "learning_rate": 0.0001281108324585386,
1340
+ "loss": 1.76,
1341
+ "step": 945
1342
+ },
1343
+ {
1344
+ "epoch": 0.8620689655172413,
1345
+ "grad_norm": 0.5078125,
1346
+ "learning_rate": 0.00012790626740269897,
1347
+ "loss": 1.6915,
1348
+ "step": 950
1349
+ },
1350
+ {
1351
+ "epoch": 0.8666061705989111,
1352
+ "grad_norm": 0.51953125,
1353
+ "learning_rate": 0.00012770088504460107,
1354
+ "loss": 1.7474,
1355
+ "step": 955
1356
+ },
1357
+ {
1358
+ "epoch": 0.8711433756805808,
1359
+ "grad_norm": 0.55859375,
1360
+ "learning_rate": 0.00012749469012056407,
1361
+ "loss": 1.7116,
1362
+ "step": 960
1363
+ },
1364
+ {
1365
+ "epoch": 0.8756805807622504,
1366
+ "grad_norm": 0.50390625,
1367
+ "learning_rate": 0.00012728768738564584,
1368
+ "loss": 1.691,
1369
+ "step": 965
1370
+ },
1371
+ {
1372
+ "epoch": 0.8802177858439202,
1373
+ "grad_norm": 0.52734375,
1374
+ "learning_rate": 0.00012707988161353307,
1375
+ "loss": 1.7683,
1376
+ "step": 970
1377
+ },
1378
+ {
1379
+ "epoch": 0.8847549909255898,
1380
+ "grad_norm": 0.546875,
1381
+ "learning_rate": 0.00012687127759643133,
1382
+ "loss": 1.7294,
1383
+ "step": 975
1384
+ },
1385
+ {
1386
+ "epoch": 0.8892921960072595,
1387
+ "grad_norm": 0.5546875,
1388
+ "learning_rate": 0.00012666188014495442,
1389
+ "loss": 1.7616,
1390
+ "step": 980
1391
+ },
1392
+ {
1393
+ "epoch": 0.8938294010889292,
1394
+ "grad_norm": 0.51953125,
1395
+ "learning_rate": 0.0001264516940880137,
1396
+ "loss": 1.7111,
1397
+ "step": 985
1398
+ },
1399
+ {
1400
+ "epoch": 0.8983666061705989,
1401
+ "grad_norm": 0.80859375,
1402
+ "learning_rate": 0.00012624072427270633,
1403
+ "loss": 1.6731,
1404
+ "step": 990
1405
+ },
1406
+ {
1407
+ "epoch": 0.9029038112522686,
1408
+ "grad_norm": 0.57421875,
1409
+ "learning_rate": 0.00012602897556420386,
1410
+ "loss": 1.7093,
1411
+ "step": 995
1412
+ },
1413
+ {
1414
+ "epoch": 0.9074410163339383,
1415
+ "grad_norm": 0.51953125,
1416
+ "learning_rate": 0.00012581645284563981,
1417
+ "loss": 1.7539,
1418
+ "step": 1000
1419
+ },
1420
+ {
1421
+ "epoch": 0.9074410163339383,
1422
+ "eval_loss": 1.6703623533248901,
1423
+ "eval_runtime": 4.0596,
1424
+ "eval_samples_per_second": 15.765,
1425
+ "eval_steps_per_second": 15.765,
1426
+ "step": 1000
1427
+ },
1428
+ {
1429
+ "epoch": 0.911978221415608,
1430
+ "grad_norm": 0.515625,
1431
+ "learning_rate": 0.00012560316101799718,
1432
+ "loss": 1.7018,
1433
+ "step": 1005
1434
+ },
1435
+ {
1436
+ "epoch": 0.9165154264972777,
1437
+ "grad_norm": 0.5546875,
1438
+ "learning_rate": 0.00012538910499999527,
1439
+ "loss": 1.7085,
1440
+ "step": 1010
1441
+ },
1442
+ {
1443
+ "epoch": 0.9210526315789473,
1444
+ "grad_norm": 0.5703125,
1445
+ "learning_rate": 0.0001251742897279766,
1446
+ "loss": 1.7526,
1447
+ "step": 1015
1448
+ },
1449
+ {
1450
+ "epoch": 0.925589836660617,
1451
+ "grad_norm": 0.54296875,
1452
+ "learning_rate": 0.00012495872015579255,
1453
+ "loss": 1.7121,
1454
+ "step": 1020
1455
+ },
1456
+ {
1457
+ "epoch": 0.9283121597096189,
1458
+ "eval_loss": 1.6611651182174683,
1459
+ "eval_runtime": 4.1505,
1460
+ "eval_samples_per_second": 15.42,
1461
+ "eval_steps_per_second": 15.42,
1462
+ "step": 1023
1463
+ },
1464
+ {
1465
+ "epoch": 0.9301270417422868,
1466
+ "grad_norm": 0.5703125,
1467
+ "learning_rate": 0.0001247424012546896,
1468
+ "loss": 1.7569,
1469
+ "step": 1025
1470
+ },
1471
+ {
1472
+ "epoch": 0.9346642468239564,
1473
+ "grad_norm": 0.515625,
1474
+ "learning_rate": 0.0001245253380131944,
1475
+ "loss": 1.7379,
1476
+ "step": 1030
1477
+ },
1478
+ {
1479
+ "epoch": 0.9392014519056261,
1480
+ "grad_norm": 1.59375,
1481
+ "learning_rate": 0.0001243075354369989,
1482
+ "loss": 1.7344,
1483
+ "step": 1035
1484
+ },
1485
+ {
1486
+ "epoch": 0.9437386569872959,
1487
+ "grad_norm": 0.65625,
1488
+ "learning_rate": 0.00012408899854884475,
1489
+ "loss": 1.7376,
1490
+ "step": 1040
1491
+ },
1492
+ {
1493
+ "epoch": 0.9482758620689655,
1494
+ "grad_norm": 0.54296875,
1495
+ "learning_rate": 0.00012386973238840757,
1496
+ "loss": 1.7244,
1497
+ "step": 1045
1498
+ },
1499
+ {
1500
+ "epoch": 0.9528130671506352,
1501
+ "grad_norm": 0.5234375,
1502
+ "learning_rate": 0.0001236497420121808,
1503
+ "loss": 1.7126,
1504
+ "step": 1050
1505
+ },
1506
+ {
1507
+ "epoch": 0.957350272232305,
1508
+ "grad_norm": 0.494140625,
1509
+ "learning_rate": 0.0001234290324933589,
1510
+ "loss": 1.7429,
1511
+ "step": 1055
1512
+ },
1513
+ {
1514
+ "epoch": 0.9618874773139746,
1515
+ "grad_norm": 0.58203125,
1516
+ "learning_rate": 0.00012320760892172057,
1517
+ "loss": 1.7277,
1518
+ "step": 1060
1519
+ },
1520
+ {
1521
+ "epoch": 0.9664246823956443,
1522
+ "grad_norm": 0.52734375,
1523
+ "learning_rate": 0.00012298547640351114,
1524
+ "loss": 1.7246,
1525
+ "step": 1065
1526
+ },
1527
+ {
1528
+ "epoch": 0.9709618874773139,
1529
+ "grad_norm": 0.546875,
1530
+ "learning_rate": 0.00012276264006132504,
1531
+ "loss": 1.6859,
1532
+ "step": 1070
1533
+ },
1534
+ {
1535
+ "epoch": 0.9754990925589837,
1536
+ "grad_norm": 0.53515625,
1537
+ "learning_rate": 0.0001225391050339876,
1538
+ "loss": 1.707,
1539
+ "step": 1075
1540
+ },
1541
+ {
1542
+ "epoch": 0.9800362976406534,
1543
+ "grad_norm": 0.515625,
1544
+ "learning_rate": 0.00012231487647643643,
1545
+ "loss": 1.7146,
1546
+ "step": 1080
1547
+ },
1548
+ {
1549
+ "epoch": 0.984573502722323,
1550
+ "grad_norm": 0.5078125,
1551
+ "learning_rate": 0.00012208995955960268,
1552
+ "loss": 1.7443,
1553
+ "step": 1085
1554
+ },
1555
+ {
1556
+ "epoch": 0.9891107078039928,
1557
+ "grad_norm": 0.55078125,
1558
+ "learning_rate": 0.00012186435947029172,
1559
+ "loss": 1.71,
1560
+ "step": 1090
1561
+ },
1562
+ {
1563
+ "epoch": 0.9936479128856625,
1564
+ "grad_norm": 0.609375,
1565
+ "learning_rate": 0.00012163808141106358,
1566
+ "loss": 1.7209,
1567
+ "step": 1095
1568
+ },
1569
+ {
1570
+ "epoch": 0.9981851179673321,
1571
+ "grad_norm": 0.52734375,
1572
+ "learning_rate": 0.0001214111306001129,
1573
+ "loss": 1.769,
1574
+ "step": 1100
1575
+ },
1576
+ {
1577
+ "epoch": 0.9990925589836661,
1578
+ "eval_loss": 1.6452035903930664,
1579
+ "eval_runtime": 4.1352,
1580
+ "eval_samples_per_second": 15.477,
1581
+ "eval_steps_per_second": 15.477,
1582
+ "step": 1101
1583
+ }
1584
+ ],
1585
+ "logging_steps": 5,
1586
+ "max_steps": 3306,
1587
+ "num_input_tokens_seen": 0,
1588
+ "num_train_epochs": 3,
1589
+ "save_steps": 500,
1590
+ "stateful_callbacks": {
1591
+ "TrainerControl": {
1592
+ "args": {
1593
+ "should_epoch_stop": false,
1594
+ "should_evaluate": false,
1595
+ "should_log": false,
1596
+ "should_save": true,
1597
+ "should_training_stop": false
1598
+ },
1599
+ "attributes": {}
1600
+ }
1601
+ },
1602
+ "total_flos": 2.4174287097755075e+18,
1603
+ "train_batch_size": 48,
1604
+ "trial_name": null,
1605
+ "trial_params": null
1606
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f3e2a6b8c0ffd529b7fa0111e83a77715efc7ac879aee268d9f58eaf00f9f68
3
+ size 5624