File size: 6,562 Bytes
628d28d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
{
  "best_global_step": null,
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.05044136191677175,
  "eval_steps": 100,
  "global_step": 20,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0025220680958385876,
      "grad_norm": 119.76318359375,
      "kl_loss": -1.1687562835330993e-15,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0,
      "loss": 2.6394360065460205,
      "step": 1,
      "total_loss": 2.6394360065460205
    },
    {
      "epoch": 0.005044136191677175,
      "grad_norm": 116.01831817626953,
      "kl_loss": -1.280914393650412e-14,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0001,
      "loss": 3.2936160564422607,
      "step": 2,
      "total_loss": 3.2936160564422607
    },
    {
      "epoch": 0.007566204287515763,
      "grad_norm": 104.04817962646484,
      "kl_loss": 7.10318071028837e-09,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0002,
      "loss": 3.084439992904663,
      "step": 3,
      "total_loss": 3.091543197631836
    },
    {
      "epoch": 0.01008827238335435,
      "grad_norm": 68.36679077148438,
      "kl_loss": 2.8489626657801637e-08,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0003,
      "loss": 3.105210304260254,
      "step": 4,
      "total_loss": 3.133699893951416
    },
    {
      "epoch": 0.012610340479192938,
      "grad_norm": 61.00284957885742,
      "kl_loss": 4.923957774849441e-08,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004,
      "loss": 3.345022678375244,
      "step": 5,
      "total_loss": 3.3942623138427734
    },
    {
      "epoch": 0.015132408575031526,
      "grad_norm": 65.48960876464844,
      "kl_loss": 1.43211394743048e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0005,
      "loss": 2.3467514514923096,
      "step": 6,
      "total_loss": 2.4899628162384033
    },
    {
      "epoch": 0.017654476670870115,
      "grad_norm": 63.001102447509766,
      "kl_loss": 9.109995602329946e-08,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004993662864385298,
      "loss": 2.5077083110809326,
      "step": 7,
      "total_loss": 2.5988082885742188
    },
    {
      "epoch": 0.0201765447667087,
      "grad_norm": 58.6073112487793,
      "kl_loss": 2.3511624647198914e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004987325728770596,
      "loss": 2.2668278217315674,
      "step": 8,
      "total_loss": 2.501944065093994
    },
    {
      "epoch": 0.02269861286254729,
      "grad_norm": 97.743896484375,
      "kl_loss": 2.1175161180053692e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004980988593155894,
      "loss": 2.352029800415039,
      "step": 9,
      "total_loss": 2.563781499862671
    },
    {
      "epoch": 0.025220680958385876,
      "grad_norm": 60.91500473022461,
      "kl_loss": 1.2846226127294358e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004974651457541192,
      "loss": 2.2376697063446045,
      "step": 10,
      "total_loss": 2.3661320209503174
    },
    {
      "epoch": 0.027742749054224466,
      "grad_norm": 55.095516204833984,
      "kl_loss": 1.4181343033214944e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.000496831432192649,
      "loss": 2.8243818283081055,
      "step": 11,
      "total_loss": 2.9661953449249268
    },
    {
      "epoch": 0.03026481715006305,
      "grad_norm": 44.97727966308594,
      "kl_loss": 1.545683971926337e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004961977186311787,
      "loss": 2.4689197540283203,
      "step": 12,
      "total_loss": 2.623488187789917
    },
    {
      "epoch": 0.03278688524590164,
      "grad_norm": 51.62504196166992,
      "kl_loss": 2.2357993145760702e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004955640050697085,
      "loss": 2.2227847576141357,
      "step": 13,
      "total_loss": 2.446364641189575
    },
    {
      "epoch": 0.03530895334174023,
      "grad_norm": 42.21575927734375,
      "kl_loss": 1.6229765265052265e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004949302915082382,
      "loss": 2.4396450519561768,
      "step": 14,
      "total_loss": 2.601942777633667
    },
    {
      "epoch": 0.03783102143757881,
      "grad_norm": 40.02684783935547,
      "kl_loss": 1.4151250127270032e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004942965779467681,
      "loss": 2.509690761566162,
      "step": 15,
      "total_loss": 2.651203155517578
    },
    {
      "epoch": 0.0403530895334174,
      "grad_norm": 44.62814712524414,
      "kl_loss": 1.450005981951108e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004936628643852978,
      "loss": 2.4844541549682617,
      "step": 16,
      "total_loss": 2.6294548511505127
    },
    {
      "epoch": 0.04287515762925599,
      "grad_norm": 41.87761688232422,
      "kl_loss": 1.397227009647395e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004930291508238277,
      "loss": 2.7985713481903076,
      "step": 17,
      "total_loss": 2.938293933868408
    },
    {
      "epoch": 0.04539722572509458,
      "grad_norm": 39.647457122802734,
      "kl_loss": 1.0770181546604363e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004923954372623574,
      "loss": 2.1876273155212402,
      "step": 18,
      "total_loss": 2.2953290939331055
    },
    {
      "epoch": 0.04791929382093316,
      "grad_norm": 44.82719039916992,
      "kl_loss": 1.325549447983576e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004917617237008873,
      "loss": 2.344290256500244,
      "step": 19,
      "total_loss": 2.4768452644348145
    },
    {
      "epoch": 0.05044136191677175,
      "grad_norm": 35.45253372192383,
      "kl_loss": 1.3449634650442022e-07,
      "kl_weight": 1000000.0,
      "learning_rate": 0.0004911280101394169,
      "loss": 2.393965244293213,
      "step": 20,
      "total_loss": 2.5284616947174072
    }
  ],
  "logging_steps": 1,
  "max_steps": 794,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 5,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": false,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 6876561408000000.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}