gsmyrnis's picture
End of training
e2d2dfb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 678,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04424778761061947,
"grad_norm": 6.127150945779567,
"learning_rate": 5e-06,
"loss": 0.5902,
"step": 10
},
{
"epoch": 0.08849557522123894,
"grad_norm": 5.74329347119261,
"learning_rate": 5e-06,
"loss": 0.5179,
"step": 20
},
{
"epoch": 0.13274336283185842,
"grad_norm": 0.7209844282034923,
"learning_rate": 5e-06,
"loss": 0.4956,
"step": 30
},
{
"epoch": 0.17699115044247787,
"grad_norm": 0.8838428515444021,
"learning_rate": 5e-06,
"loss": 0.4693,
"step": 40
},
{
"epoch": 0.22123893805309736,
"grad_norm": 1.047241933895238,
"learning_rate": 5e-06,
"loss": 0.4542,
"step": 50
},
{
"epoch": 0.26548672566371684,
"grad_norm": 0.6249873423196232,
"learning_rate": 5e-06,
"loss": 0.4507,
"step": 60
},
{
"epoch": 0.30973451327433627,
"grad_norm": 0.6688340399513245,
"learning_rate": 5e-06,
"loss": 0.4422,
"step": 70
},
{
"epoch": 0.35398230088495575,
"grad_norm": 0.5938577799260061,
"learning_rate": 5e-06,
"loss": 0.4371,
"step": 80
},
{
"epoch": 0.39823008849557523,
"grad_norm": 0.39377946979563905,
"learning_rate": 5e-06,
"loss": 0.4288,
"step": 90
},
{
"epoch": 0.4424778761061947,
"grad_norm": 0.3494222859720882,
"learning_rate": 5e-06,
"loss": 0.4274,
"step": 100
},
{
"epoch": 0.48672566371681414,
"grad_norm": 0.37585161708391424,
"learning_rate": 5e-06,
"loss": 0.4204,
"step": 110
},
{
"epoch": 0.5309734513274337,
"grad_norm": 0.41629482517793825,
"learning_rate": 5e-06,
"loss": 0.4204,
"step": 120
},
{
"epoch": 0.5752212389380531,
"grad_norm": 0.476310089674328,
"learning_rate": 5e-06,
"loss": 0.4205,
"step": 130
},
{
"epoch": 0.6194690265486725,
"grad_norm": 0.33466258836176344,
"learning_rate": 5e-06,
"loss": 0.4189,
"step": 140
},
{
"epoch": 0.6637168141592921,
"grad_norm": 0.4017367648814896,
"learning_rate": 5e-06,
"loss": 0.4153,
"step": 150
},
{
"epoch": 0.7079646017699115,
"grad_norm": 0.4216322560941302,
"learning_rate": 5e-06,
"loss": 0.4127,
"step": 160
},
{
"epoch": 0.7522123893805309,
"grad_norm": 0.4974612613694754,
"learning_rate": 5e-06,
"loss": 0.4171,
"step": 170
},
{
"epoch": 0.7964601769911505,
"grad_norm": 0.4002778638113851,
"learning_rate": 5e-06,
"loss": 0.4136,
"step": 180
},
{
"epoch": 0.8407079646017699,
"grad_norm": 0.38244063424271957,
"learning_rate": 5e-06,
"loss": 0.4141,
"step": 190
},
{
"epoch": 0.8849557522123894,
"grad_norm": 0.6422183389700243,
"learning_rate": 5e-06,
"loss": 0.4047,
"step": 200
},
{
"epoch": 0.9292035398230089,
"grad_norm": 0.47781718318215916,
"learning_rate": 5e-06,
"loss": 0.4114,
"step": 210
},
{
"epoch": 0.9734513274336283,
"grad_norm": 0.458646597184022,
"learning_rate": 5e-06,
"loss": 0.4066,
"step": 220
},
{
"epoch": 1.0,
"eval_loss": 0.40742945671081543,
"eval_runtime": 21.8393,
"eval_samples_per_second": 278.306,
"eval_steps_per_second": 1.099,
"step": 226
},
{
"epoch": 1.0176991150442478,
"grad_norm": 0.40450956449919656,
"learning_rate": 5e-06,
"loss": 0.3954,
"step": 230
},
{
"epoch": 1.0619469026548674,
"grad_norm": 0.33840868352830317,
"learning_rate": 5e-06,
"loss": 0.3815,
"step": 240
},
{
"epoch": 1.1061946902654867,
"grad_norm": 0.42957928697241526,
"learning_rate": 5e-06,
"loss": 0.376,
"step": 250
},
{
"epoch": 1.1504424778761062,
"grad_norm": 0.44727075308000447,
"learning_rate": 5e-06,
"loss": 0.3824,
"step": 260
},
{
"epoch": 1.1946902654867257,
"grad_norm": 0.3832458382206874,
"learning_rate": 5e-06,
"loss": 0.3799,
"step": 270
},
{
"epoch": 1.238938053097345,
"grad_norm": 0.46565842447548633,
"learning_rate": 5e-06,
"loss": 0.3763,
"step": 280
},
{
"epoch": 1.2831858407079646,
"grad_norm": 0.3663739624439434,
"learning_rate": 5e-06,
"loss": 0.3813,
"step": 290
},
{
"epoch": 1.3274336283185841,
"grad_norm": 0.38686016936047624,
"learning_rate": 5e-06,
"loss": 0.3825,
"step": 300
},
{
"epoch": 1.3716814159292037,
"grad_norm": 0.4469971428096629,
"learning_rate": 5e-06,
"loss": 0.3797,
"step": 310
},
{
"epoch": 1.415929203539823,
"grad_norm": 0.3943929709248344,
"learning_rate": 5e-06,
"loss": 0.3771,
"step": 320
},
{
"epoch": 1.4601769911504425,
"grad_norm": 0.5005987203493157,
"learning_rate": 5e-06,
"loss": 0.3797,
"step": 330
},
{
"epoch": 1.504424778761062,
"grad_norm": 0.4398991459645874,
"learning_rate": 5e-06,
"loss": 0.3782,
"step": 340
},
{
"epoch": 1.5486725663716814,
"grad_norm": 0.34410767390147695,
"learning_rate": 5e-06,
"loss": 0.3781,
"step": 350
},
{
"epoch": 1.592920353982301,
"grad_norm": 0.3960889669303223,
"learning_rate": 5e-06,
"loss": 0.3749,
"step": 360
},
{
"epoch": 1.6371681415929205,
"grad_norm": 0.4269177410578348,
"learning_rate": 5e-06,
"loss": 0.3774,
"step": 370
},
{
"epoch": 1.6814159292035398,
"grad_norm": 0.5155261299248002,
"learning_rate": 5e-06,
"loss": 0.3804,
"step": 380
},
{
"epoch": 1.7256637168141593,
"grad_norm": 0.43969372154192843,
"learning_rate": 5e-06,
"loss": 0.378,
"step": 390
},
{
"epoch": 1.7699115044247788,
"grad_norm": 0.4476097491183775,
"learning_rate": 5e-06,
"loss": 0.3755,
"step": 400
},
{
"epoch": 1.8141592920353982,
"grad_norm": 0.38686563229116155,
"learning_rate": 5e-06,
"loss": 0.3801,
"step": 410
},
{
"epoch": 1.8584070796460177,
"grad_norm": 0.35475500773398083,
"learning_rate": 5e-06,
"loss": 0.3737,
"step": 420
},
{
"epoch": 1.9026548672566372,
"grad_norm": 0.3300920550320411,
"learning_rate": 5e-06,
"loss": 0.377,
"step": 430
},
{
"epoch": 1.9469026548672566,
"grad_norm": 0.37302140942747203,
"learning_rate": 5e-06,
"loss": 0.372,
"step": 440
},
{
"epoch": 1.991150442477876,
"grad_norm": 0.3842303488060595,
"learning_rate": 5e-06,
"loss": 0.3734,
"step": 450
},
{
"epoch": 2.0,
"eval_loss": 0.39825692772865295,
"eval_runtime": 21.4661,
"eval_samples_per_second": 283.144,
"eval_steps_per_second": 1.118,
"step": 452
},
{
"epoch": 2.0353982300884956,
"grad_norm": 0.48990695812840507,
"learning_rate": 5e-06,
"loss": 0.3513,
"step": 460
},
{
"epoch": 2.079646017699115,
"grad_norm": 0.45716992449403504,
"learning_rate": 5e-06,
"loss": 0.3424,
"step": 470
},
{
"epoch": 2.1238938053097347,
"grad_norm": 0.4089130956180029,
"learning_rate": 5e-06,
"loss": 0.3436,
"step": 480
},
{
"epoch": 2.168141592920354,
"grad_norm": 0.426162580374922,
"learning_rate": 5e-06,
"loss": 0.3476,
"step": 490
},
{
"epoch": 2.2123893805309733,
"grad_norm": 0.3887768051888474,
"learning_rate": 5e-06,
"loss": 0.3456,
"step": 500
},
{
"epoch": 2.256637168141593,
"grad_norm": 0.49818725927434065,
"learning_rate": 5e-06,
"loss": 0.346,
"step": 510
},
{
"epoch": 2.3008849557522124,
"grad_norm": 0.3869718840156669,
"learning_rate": 5e-06,
"loss": 0.3458,
"step": 520
},
{
"epoch": 2.3451327433628317,
"grad_norm": 0.4420786488564716,
"learning_rate": 5e-06,
"loss": 0.3409,
"step": 530
},
{
"epoch": 2.3893805309734515,
"grad_norm": 0.4328163120927778,
"learning_rate": 5e-06,
"loss": 0.3487,
"step": 540
},
{
"epoch": 2.433628318584071,
"grad_norm": 0.39197907411119853,
"learning_rate": 5e-06,
"loss": 0.3451,
"step": 550
},
{
"epoch": 2.47787610619469,
"grad_norm": 0.36416856007458076,
"learning_rate": 5e-06,
"loss": 0.3503,
"step": 560
},
{
"epoch": 2.52212389380531,
"grad_norm": 0.3746017341827326,
"learning_rate": 5e-06,
"loss": 0.3469,
"step": 570
},
{
"epoch": 2.566371681415929,
"grad_norm": 0.3618786916476107,
"learning_rate": 5e-06,
"loss": 0.3464,
"step": 580
},
{
"epoch": 2.6106194690265485,
"grad_norm": 0.41598805205539596,
"learning_rate": 5e-06,
"loss": 0.3435,
"step": 590
},
{
"epoch": 2.6548672566371683,
"grad_norm": 0.37961766765714905,
"learning_rate": 5e-06,
"loss": 0.3486,
"step": 600
},
{
"epoch": 2.6991150442477876,
"grad_norm": 0.39738095608916224,
"learning_rate": 5e-06,
"loss": 0.3488,
"step": 610
},
{
"epoch": 2.7433628318584073,
"grad_norm": 0.40094307810609703,
"learning_rate": 5e-06,
"loss": 0.3459,
"step": 620
},
{
"epoch": 2.7876106194690267,
"grad_norm": 0.355834042921306,
"learning_rate": 5e-06,
"loss": 0.3415,
"step": 630
},
{
"epoch": 2.831858407079646,
"grad_norm": 0.3745969159488807,
"learning_rate": 5e-06,
"loss": 0.35,
"step": 640
},
{
"epoch": 2.8761061946902657,
"grad_norm": 0.3789920652707106,
"learning_rate": 5e-06,
"loss": 0.3477,
"step": 650
},
{
"epoch": 2.920353982300885,
"grad_norm": 0.3918177591119166,
"learning_rate": 5e-06,
"loss": 0.3477,
"step": 660
},
{
"epoch": 2.9646017699115044,
"grad_norm": 0.33578037413287726,
"learning_rate": 5e-06,
"loss": 0.3466,
"step": 670
},
{
"epoch": 3.0,
"eval_loss": 0.3991487920284271,
"eval_runtime": 21.5542,
"eval_samples_per_second": 281.987,
"eval_steps_per_second": 1.113,
"step": 678
},
{
"epoch": 3.0,
"step": 678,
"total_flos": 1135675252408320.0,
"train_loss": 0.38758731908151184,
"train_runtime": 4370.1856,
"train_samples_per_second": 79.274,
"train_steps_per_second": 0.155
}
],
"logging_steps": 10,
"max_steps": 678,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1135675252408320.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}