sedrickkeh's picture
End of training
f4962c2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.976,
"eval_steps": 500,
"global_step": 93,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.032,
"grad_norm": 6.017088211518867,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.8466,
"step": 1
},
{
"epoch": 0.064,
"grad_norm": 5.951857736161836,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.8716,
"step": 2
},
{
"epoch": 0.096,
"grad_norm": 5.7077662516941885,
"learning_rate": 3e-06,
"loss": 0.8566,
"step": 3
},
{
"epoch": 0.128,
"grad_norm": 5.833985690256031,
"learning_rate": 4.000000000000001e-06,
"loss": 0.8807,
"step": 4
},
{
"epoch": 0.16,
"grad_norm": 4.601869768823213,
"learning_rate": 5e-06,
"loss": 0.7739,
"step": 5
},
{
"epoch": 0.192,
"grad_norm": 2.656272883782473,
"learning_rate": 6e-06,
"loss": 0.8025,
"step": 6
},
{
"epoch": 0.224,
"grad_norm": 2.191913269298967,
"learning_rate": 7e-06,
"loss": 0.7636,
"step": 7
},
{
"epoch": 0.256,
"grad_norm": 4.059469817937578,
"learning_rate": 8.000000000000001e-06,
"loss": 0.7703,
"step": 8
},
{
"epoch": 0.288,
"grad_norm": 4.482509294996795,
"learning_rate": 9e-06,
"loss": 0.7323,
"step": 9
},
{
"epoch": 0.32,
"grad_norm": 5.2896734327529975,
"learning_rate": 1e-05,
"loss": 0.7473,
"step": 10
},
{
"epoch": 0.352,
"grad_norm": 5.648561404857537,
"learning_rate": 9.996418774081658e-06,
"loss": 0.7653,
"step": 11
},
{
"epoch": 0.384,
"grad_norm": 4.49874465390001,
"learning_rate": 9.985680226398261e-06,
"loss": 0.7378,
"step": 12
},
{
"epoch": 0.416,
"grad_norm": 2.5886351134391057,
"learning_rate": 9.967799739815925e-06,
"loss": 0.6586,
"step": 13
},
{
"epoch": 0.448,
"grad_norm": 2.170217772669203,
"learning_rate": 9.942802927959444e-06,
"loss": 0.7204,
"step": 14
},
{
"epoch": 0.48,
"grad_norm": 2.5221677834113967,
"learning_rate": 9.910725598521014e-06,
"loss": 0.6305,
"step": 15
},
{
"epoch": 0.512,
"grad_norm": 2.4169452505699134,
"learning_rate": 9.871613701966067e-06,
"loss": 0.6628,
"step": 16
},
{
"epoch": 0.544,
"grad_norm": 2.1361025544158485,
"learning_rate": 9.825523265709667e-06,
"loss": 0.6755,
"step": 17
},
{
"epoch": 0.576,
"grad_norm": 1.256643746613112,
"learning_rate": 9.772520313857777e-06,
"loss": 0.6011,
"step": 18
},
{
"epoch": 0.608,
"grad_norm": 1.5131065345982715,
"learning_rate": 9.712680772628365e-06,
"loss": 0.6595,
"step": 19
},
{
"epoch": 0.64,
"grad_norm": 1.5554495037101939,
"learning_rate": 9.646090361587828e-06,
"loss": 0.5806,
"step": 20
},
{
"epoch": 0.672,
"grad_norm": 1.479197692013354,
"learning_rate": 9.572844470858537e-06,
"loss": 0.6069,
"step": 21
},
{
"epoch": 0.704,
"grad_norm": 1.09952905385751,
"learning_rate": 9.493048024473413e-06,
"loss": 0.6094,
"step": 22
},
{
"epoch": 0.736,
"grad_norm": 1.0257031667969818,
"learning_rate": 9.406815330073244e-06,
"loss": 0.5873,
"step": 23
},
{
"epoch": 0.768,
"grad_norm": 1.3397890175148242,
"learning_rate": 9.314269915162115e-06,
"loss": 0.6475,
"step": 24
},
{
"epoch": 0.8,
"grad_norm": 1.1987888823222785,
"learning_rate": 9.215544350155423e-06,
"loss": 0.5892,
"step": 25
},
{
"epoch": 0.832,
"grad_norm": 0.987671863849975,
"learning_rate": 9.110780058474052e-06,
"loss": 0.6205,
"step": 26
},
{
"epoch": 0.864,
"grad_norm": 1.0538210475650935,
"learning_rate": 9.000127113956673e-06,
"loss": 0.635,
"step": 27
},
{
"epoch": 0.896,
"grad_norm": 0.9785904516879229,
"learning_rate": 8.883744025880429e-06,
"loss": 0.592,
"step": 28
},
{
"epoch": 0.928,
"grad_norm": 1.0498722982137005,
"learning_rate": 8.761797511897907e-06,
"loss": 0.6359,
"step": 29
},
{
"epoch": 0.96,
"grad_norm": 0.9942472305105629,
"learning_rate": 8.634462259215719e-06,
"loss": 0.614,
"step": 30
},
{
"epoch": 0.992,
"grad_norm": 1.047896159694581,
"learning_rate": 8.501920674356755e-06,
"loss": 0.5893,
"step": 31
},
{
"epoch": 1.024,
"grad_norm": 0.8393117657377732,
"learning_rate": 8.364362621864595e-06,
"loss": 0.5501,
"step": 32
},
{
"epoch": 1.056,
"grad_norm": 0.8368148848259993,
"learning_rate": 8.221985152324385e-06,
"loss": 0.5004,
"step": 33
},
{
"epoch": 1.088,
"grad_norm": 1.0449083783998578,
"learning_rate": 8.07499222008977e-06,
"loss": 0.5262,
"step": 34
},
{
"epoch": 1.12,
"grad_norm": 0.9498527409166139,
"learning_rate": 7.923594391120237e-06,
"loss": 0.5087,
"step": 35
},
{
"epoch": 1.152,
"grad_norm": 0.9204978138749593,
"learning_rate": 7.768008541347423e-06,
"loss": 0.5083,
"step": 36
},
{
"epoch": 1.184,
"grad_norm": 0.9742025167645811,
"learning_rate": 7.608457546002423e-06,
"loss": 0.5109,
"step": 37
},
{
"epoch": 1.216,
"grad_norm": 0.9820066122143393,
"learning_rate": 7.445169960349167e-06,
"loss": 0.542,
"step": 38
},
{
"epoch": 1.248,
"grad_norm": 0.8731760697551055,
"learning_rate": 7.278379692281209e-06,
"loss": 0.4646,
"step": 39
},
{
"epoch": 1.28,
"grad_norm": 0.8552117765440119,
"learning_rate": 7.10832566725092e-06,
"loss": 0.4691,
"step": 40
},
{
"epoch": 1.312,
"grad_norm": 1.0485886921584444,
"learning_rate": 6.9352514860110876e-06,
"loss": 0.4867,
"step": 41
},
{
"epoch": 1.3439999999999999,
"grad_norm": 0.8054399255100043,
"learning_rate": 6.759405075659165e-06,
"loss": 0.4988,
"step": 42
},
{
"epoch": 1.376,
"grad_norm": 0.8137660425234108,
"learning_rate": 6.58103833448412e-06,
"loss": 0.4943,
"step": 43
},
{
"epoch": 1.408,
"grad_norm": 0.9191572909893893,
"learning_rate": 6.4004067711245366e-06,
"loss": 0.5099,
"step": 44
},
{
"epoch": 1.44,
"grad_norm": 0.8579377944173575,
"learning_rate": 6.2177691385549595e-06,
"loss": 0.4974,
"step": 45
},
{
"epoch": 1.472,
"grad_norm": 0.8482605744927274,
"learning_rate": 6.033387063424765e-06,
"loss": 0.4619,
"step": 46
},
{
"epoch": 1.504,
"grad_norm": 0.7598606051104131,
"learning_rate": 5.8475246712804845e-06,
"loss": 0.4977,
"step": 47
},
{
"epoch": 1.536,
"grad_norm": 0.7248290008384539,
"learning_rate": 5.660448208208513e-06,
"loss": 0.4893,
"step": 48
},
{
"epoch": 1.568,
"grad_norm": 0.8290652695699064,
"learning_rate": 5.472425659440157e-06,
"loss": 0.4997,
"step": 49
},
{
"epoch": 1.6,
"grad_norm": 0.8445778166933251,
"learning_rate": 5.2837263654653715e-06,
"loss": 0.4802,
"step": 50
},
{
"epoch": 1.6320000000000001,
"grad_norm": 0.8222728417363387,
"learning_rate": 5.094620636205096e-06,
"loss": 0.5016,
"step": 51
},
{
"epoch": 1.6640000000000001,
"grad_norm": 0.7237424288555369,
"learning_rate": 4.905379363794907e-06,
"loss": 0.4943,
"step": 52
},
{
"epoch": 1.696,
"grad_norm": 0.8333802083768971,
"learning_rate": 4.71627363453463e-06,
"loss": 0.491,
"step": 53
},
{
"epoch": 1.728,
"grad_norm": 0.6818057424511519,
"learning_rate": 4.527574340559844e-06,
"loss": 0.4378,
"step": 54
},
{
"epoch": 1.76,
"grad_norm": 0.8299127275023862,
"learning_rate": 4.33955179179149e-06,
"loss": 0.4658,
"step": 55
},
{
"epoch": 1.792,
"grad_norm": 0.8405326607955101,
"learning_rate": 4.152475328719517e-06,
"loss": 0.5099,
"step": 56
},
{
"epoch": 1.8239999999999998,
"grad_norm": 0.7492090483975039,
"learning_rate": 3.966612936575235e-06,
"loss": 0.4825,
"step": 57
},
{
"epoch": 1.8559999999999999,
"grad_norm": 0.7030190039796792,
"learning_rate": 3.782230861445041e-06,
"loss": 0.4644,
"step": 58
},
{
"epoch": 1.888,
"grad_norm": 0.6811613244860312,
"learning_rate": 3.5995932288754655e-06,
"loss": 0.4257,
"step": 59
},
{
"epoch": 1.92,
"grad_norm": 0.7603335654736988,
"learning_rate": 3.4189616655158803e-06,
"loss": 0.4788,
"step": 60
},
{
"epoch": 1.952,
"grad_norm": 0.7077292083579106,
"learning_rate": 3.240594924340835e-06,
"loss": 0.4679,
"step": 61
},
{
"epoch": 1.984,
"grad_norm": 0.8338120865067291,
"learning_rate": 3.0647485139889145e-06,
"loss": 0.5145,
"step": 62
},
{
"epoch": 2.016,
"grad_norm": 0.6135927406398123,
"learning_rate": 2.89167433274908e-06,
"loss": 0.4013,
"step": 63
},
{
"epoch": 2.048,
"grad_norm": 0.7706602310876762,
"learning_rate": 2.721620307718793e-06,
"loss": 0.4279,
"step": 64
},
{
"epoch": 2.08,
"grad_norm": 0.6693415795433767,
"learning_rate": 2.554830039650834e-06,
"loss": 0.4074,
"step": 65
},
{
"epoch": 2.112,
"grad_norm": 0.6646712904057168,
"learning_rate": 2.391542453997578e-06,
"loss": 0.3972,
"step": 66
},
{
"epoch": 2.144,
"grad_norm": 0.6277893548891085,
"learning_rate": 2.2319914586525776e-06,
"loss": 0.4317,
"step": 67
},
{
"epoch": 2.176,
"grad_norm": 0.6861006207092378,
"learning_rate": 2.0764056088797646e-06,
"loss": 0.3826,
"step": 68
},
{
"epoch": 2.208,
"grad_norm": 0.640005258117782,
"learning_rate": 1.9250077799102323e-06,
"loss": 0.4041,
"step": 69
},
{
"epoch": 2.24,
"grad_norm": 0.6248654175061756,
"learning_rate": 1.7780148476756148e-06,
"loss": 0.4387,
"step": 70
},
{
"epoch": 2.2720000000000002,
"grad_norm": 0.6670800065284683,
"learning_rate": 1.6356373781354058e-06,
"loss": 0.4334,
"step": 71
},
{
"epoch": 2.304,
"grad_norm": 0.6112441982790366,
"learning_rate": 1.4980793256432474e-06,
"loss": 0.4368,
"step": 72
},
{
"epoch": 2.336,
"grad_norm": 0.6820462674363659,
"learning_rate": 1.3655377407842813e-06,
"loss": 0.4073,
"step": 73
},
{
"epoch": 2.368,
"grad_norm": 0.5637152749781639,
"learning_rate": 1.2382024881020937e-06,
"loss": 0.4026,
"step": 74
},
{
"epoch": 2.4,
"grad_norm": 0.62501378989424,
"learning_rate": 1.1162559741195733e-06,
"loss": 0.3788,
"step": 75
},
{
"epoch": 2.432,
"grad_norm": 0.5909257721151473,
"learning_rate": 9.998728860433277e-07,
"loss": 0.414,
"step": 76
},
{
"epoch": 2.464,
"grad_norm": 0.59961756687512,
"learning_rate": 8.892199415259501e-07,
"loss": 0.3815,
"step": 77
},
{
"epoch": 2.496,
"grad_norm": 0.6327449480704133,
"learning_rate": 7.844556498445788e-07,
"loss": 0.3731,
"step": 78
},
{
"epoch": 2.528,
"grad_norm": 0.6163817718459151,
"learning_rate": 6.857300848378857e-07,
"loss": 0.4096,
"step": 79
},
{
"epoch": 2.56,
"grad_norm": 0.614006560513663,
"learning_rate": 5.931846699267558e-07,
"loss": 0.4437,
"step": 80
},
{
"epoch": 2.592,
"grad_norm": 0.6432304424213249,
"learning_rate": 5.0695197552659e-07,
"loss": 0.4076,
"step": 81
},
{
"epoch": 2.624,
"grad_norm": 0.6064212963275805,
"learning_rate": 4.271555291414636e-07,
"loss": 0.3967,
"step": 82
},
{
"epoch": 2.656,
"grad_norm": 0.6049906222701497,
"learning_rate": 3.539096384121743e-07,
"loss": 0.3843,
"step": 83
},
{
"epoch": 2.6879999999999997,
"grad_norm": 0.5757860975137236,
"learning_rate": 2.873192273716369e-07,
"loss": 0.3836,
"step": 84
},
{
"epoch": 2.7199999999999998,
"grad_norm": 0.6129350159111668,
"learning_rate": 2.274796861422246e-07,
"loss": 0.4543,
"step": 85
},
{
"epoch": 2.752,
"grad_norm": 0.6388122099182162,
"learning_rate": 1.7447673429033361e-07,
"loss": 0.3527,
"step": 86
},
{
"epoch": 2.784,
"grad_norm": 0.6016188801263589,
"learning_rate": 1.2838629803393343e-07,
"loss": 0.3922,
"step": 87
},
{
"epoch": 2.816,
"grad_norm": 0.5819716193194474,
"learning_rate": 8.927440147898703e-08,
"loss": 0.3972,
"step": 88
},
{
"epoch": 2.848,
"grad_norm": 0.5860665357952272,
"learning_rate": 5.7197072040557356e-08,
"loss": 0.4262,
"step": 89
},
{
"epoch": 2.88,
"grad_norm": 0.6249229708634069,
"learning_rate": 3.220026018407541e-08,
"loss": 0.4841,
"step": 90
},
{
"epoch": 2.912,
"grad_norm": 0.5591758817715189,
"learning_rate": 1.431977360173975e-08,
"loss": 0.3779,
"step": 91
},
{
"epoch": 2.944,
"grad_norm": 0.6402184136731239,
"learning_rate": 3.5812259183426457e-09,
"loss": 0.4023,
"step": 92
},
{
"epoch": 2.976,
"grad_norm": 0.6090640200163872,
"learning_rate": 0.0,
"loss": 0.3697,
"step": 93
},
{
"epoch": 2.976,
"step": 93,
"total_flos": 5.107931021941146e+16,
"train_loss": 0.5300623857846825,
"train_runtime": 4609.5522,
"train_samples_per_second": 0.648,
"train_steps_per_second": 0.02
}
],
"logging_steps": 1,
"max_steps": 93,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.107931021941146e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}