File size: 4,780 Bytes
67a11ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 3.9980430528375734,
  "eval_steps": 50,
  "global_step": 1020,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.19569471624266144,
      "grad_norm": 0.8885060548782349,
      "learning_rate": 4.901960784313726e-06,
      "loss": 0.9897,
      "step": 50
    },
    {
      "epoch": 0.3913894324853229,
      "grad_norm": 0.45187801122665405,
      "learning_rate": 9.803921568627451e-06,
      "loss": 0.7524,
      "step": 100
    },
    {
      "epoch": 0.5870841487279843,
      "grad_norm": 0.43322402238845825,
      "learning_rate": 1.4705882352941177e-05,
      "loss": 0.6248,
      "step": 150
    },
    {
      "epoch": 0.7827788649706457,
      "grad_norm": 0.6817905902862549,
      "learning_rate": 1.9607843137254903e-05,
      "loss": 0.5634,
      "step": 200
    },
    {
      "epoch": 0.9784735812133072,
      "grad_norm": 0.6186803579330444,
      "learning_rate": 2.4509803921568626e-05,
      "loss": 0.5199,
      "step": 250
    },
    {
      "epoch": 0.9980430528375733,
      "eval_loss": 0.5416576862335205,
      "eval_runtime": 67.3935,
      "eval_samples_per_second": 3.368,
      "eval_steps_per_second": 0.43,
      "step": 255
    },
    {
      "epoch": 1.1741682974559686,
      "grad_norm": 0.8519768118858337,
      "learning_rate": 2.9411764705882354e-05,
      "loss": 0.5778,
      "step": 300
    },
    {
      "epoch": 1.36986301369863,
      "grad_norm": 0.6045963764190674,
      "learning_rate": 3.431372549019608e-05,
      "loss": 0.5183,
      "step": 350
    },
    {
      "epoch": 1.5655577299412915,
      "grad_norm": 0.6654757261276245,
      "learning_rate": 3.9215686274509805e-05,
      "loss": 0.5266,
      "step": 400
    },
    {
      "epoch": 1.7612524461839532,
      "grad_norm": 0.5622931718826294,
      "learning_rate": 4.411764705882353e-05,
      "loss": 0.5165,
      "step": 450
    },
    {
      "epoch": 1.9569471624266144,
      "grad_norm": 0.4925207495689392,
      "learning_rate": 4.901960784313725e-05,
      "loss": 0.5031,
      "step": 500
    },
    {
      "epoch": 2.0,
      "eval_loss": 0.5099073648452759,
      "eval_runtime": 67.3595,
      "eval_samples_per_second": 3.37,
      "eval_steps_per_second": 0.431,
      "step": 511
    },
    {
      "epoch": 2.152641878669276,
      "grad_norm": 0.6266790628433228,
      "learning_rate": 4.995258321842611e-05,
      "loss": 0.5236,
      "step": 550
    },
    {
      "epoch": 2.3483365949119372,
      "grad_norm": 0.5779295563697815,
      "learning_rate": 4.976026077188013e-05,
      "loss": 0.4695,
      "step": 600
    },
    {
      "epoch": 2.544031311154599,
      "grad_norm": 0.7699418663978577,
      "learning_rate": 4.942120794399002e-05,
      "loss": 0.5105,
      "step": 650
    },
    {
      "epoch": 2.73972602739726,
      "grad_norm": 0.4785248339176178,
      "learning_rate": 4.893743397654811e-05,
      "loss": 0.5547,
      "step": 700
    },
    {
      "epoch": 2.935420743639922,
      "grad_norm": 0.534096360206604,
      "learning_rate": 4.8311805735108894e-05,
      "loss": 0.4766,
      "step": 750
    },
    {
      "epoch": 2.9980430528375734,
      "eval_loss": 0.4970957636833191,
      "eval_runtime": 67.3751,
      "eval_samples_per_second": 3.369,
      "eval_steps_per_second": 0.43,
      "step": 766
    },
    {
      "epoch": 3.136986301369863,
      "grad_norm": 0.557356595993042,
      "learning_rate": 4.754803071981916e-05,
      "loss": 0.4412,
      "step": 800
    },
    {
      "epoch": 3.3326810176125243,
      "grad_norm": 0.6019110083580017,
      "learning_rate": 4.665063509461097e-05,
      "loss": 0.4561,
      "step": 850
    },
    {
      "epoch": 3.528375733855186,
      "grad_norm": 0.6057672500610352,
      "learning_rate": 4.5624936864957556e-05,
      "loss": 0.4561,
      "step": 900
    },
    {
      "epoch": 3.724070450097847,
      "grad_norm": 0.4167369306087494,
      "learning_rate": 4.447701436314176e-05,
      "loss": 0.4621,
      "step": 950
    },
    {
      "epoch": 3.919765166340509,
      "grad_norm": 0.5263449549674988,
      "learning_rate": 4.321367022779476e-05,
      "loss": 0.5035,
      "step": 1000
    },
    {
      "epoch": 3.9980430528375734,
      "eval_loss": 0.495980829000473,
      "eval_runtime": 67.4502,
      "eval_samples_per_second": 3.365,
      "eval_steps_per_second": 0.43,
      "step": 1020
    }
  ],
  "logging_steps": 50,
  "max_steps": 2550,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 10,
  "save_steps": 500,
  "total_flos": 6.79057249258537e+17,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}