yujunzhou's picture
End of training
7ab1966 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.9728,
"eval_steps": 500,
"global_step": 780,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.064,
"grad_norm": 115.26007653843197,
"learning_rate": 1.153846153846154e-06,
"loss": 1.8013,
"step": 10
},
{
"epoch": 0.128,
"grad_norm": 10.569494321215535,
"learning_rate": 2.435897435897436e-06,
"loss": 0.9178,
"step": 20
},
{
"epoch": 0.192,
"grad_norm": 4.124932595911465,
"learning_rate": 3.7179487179487184e-06,
"loss": 0.0701,
"step": 30
},
{
"epoch": 0.256,
"grad_norm": 2.159300862803656,
"learning_rate": 5e-06,
"loss": 0.063,
"step": 40
},
{
"epoch": 0.32,
"grad_norm": 1.9071472971676064,
"learning_rate": 6.282051282051282e-06,
"loss": 0.0551,
"step": 50
},
{
"epoch": 0.384,
"grad_norm": 1.77751831177194,
"learning_rate": 7.564102564102564e-06,
"loss": 0.1684,
"step": 60
},
{
"epoch": 0.448,
"grad_norm": 2.8846747837228044,
"learning_rate": 8.846153846153847e-06,
"loss": 0.0594,
"step": 70
},
{
"epoch": 0.512,
"grad_norm": 2.2232099871457214,
"learning_rate": 9.985754985754987e-06,
"loss": 0.0701,
"step": 80
},
{
"epoch": 0.576,
"grad_norm": 0.17533270856659205,
"learning_rate": 9.843304843304844e-06,
"loss": 0.0472,
"step": 90
},
{
"epoch": 0.64,
"grad_norm": 4.6946861778266005,
"learning_rate": 9.700854700854701e-06,
"loss": 0.0884,
"step": 100
},
{
"epoch": 0.704,
"grad_norm": 1.5944295283894352,
"learning_rate": 9.558404558404559e-06,
"loss": 0.0524,
"step": 110
},
{
"epoch": 0.768,
"grad_norm": 12.143478923229893,
"learning_rate": 9.415954415954418e-06,
"loss": 0.0751,
"step": 120
},
{
"epoch": 0.832,
"grad_norm": 1.928902876563226,
"learning_rate": 9.273504273504275e-06,
"loss": 0.0468,
"step": 130
},
{
"epoch": 0.896,
"grad_norm": 6.170092925228969,
"learning_rate": 9.131054131054132e-06,
"loss": 0.0599,
"step": 140
},
{
"epoch": 0.96,
"grad_norm": 2.597167689806361,
"learning_rate": 8.988603988603988e-06,
"loss": 0.0391,
"step": 150
},
{
"epoch": 1.0192,
"grad_norm": 1.6009822144938042,
"learning_rate": 8.846153846153847e-06,
"loss": 0.0651,
"step": 160
},
{
"epoch": 1.0832,
"grad_norm": 1.95329743835081,
"learning_rate": 8.703703703703705e-06,
"loss": 0.0458,
"step": 170
},
{
"epoch": 1.1472,
"grad_norm": 3.5182962689108472,
"learning_rate": 8.561253561253562e-06,
"loss": 0.042,
"step": 180
},
{
"epoch": 1.2112,
"grad_norm": 0.6280194803283852,
"learning_rate": 8.41880341880342e-06,
"loss": 0.0486,
"step": 190
},
{
"epoch": 1.2752,
"grad_norm": 2.005435993786338,
"learning_rate": 8.276353276353277e-06,
"loss": 0.044,
"step": 200
},
{
"epoch": 1.3392,
"grad_norm": 3.625729078804922,
"learning_rate": 8.133903133903136e-06,
"loss": 0.0496,
"step": 210
},
{
"epoch": 1.4032,
"grad_norm": 5.776880834282877,
"learning_rate": 7.991452991452993e-06,
"loss": 0.0598,
"step": 220
},
{
"epoch": 1.4672,
"grad_norm": 1.6783013007732481,
"learning_rate": 7.849002849002849e-06,
"loss": 0.048,
"step": 230
},
{
"epoch": 1.5312000000000001,
"grad_norm": 1.663831218977686,
"learning_rate": 7.706552706552706e-06,
"loss": 0.0423,
"step": 240
},
{
"epoch": 1.5952,
"grad_norm": 1.9513045254560206,
"learning_rate": 7.564102564102564e-06,
"loss": 0.0481,
"step": 250
},
{
"epoch": 1.6592,
"grad_norm": 2.2562075660922165,
"learning_rate": 7.421652421652423e-06,
"loss": 0.0498,
"step": 260
},
{
"epoch": 1.7231999999999998,
"grad_norm": 3.2420166505807773,
"learning_rate": 7.27920227920228e-06,
"loss": 0.029,
"step": 270
},
{
"epoch": 1.7872,
"grad_norm": 2.0275028822982852,
"learning_rate": 7.136752136752137e-06,
"loss": 0.0393,
"step": 280
},
{
"epoch": 1.8512,
"grad_norm": 1.8500231759762549,
"learning_rate": 6.9943019943019955e-06,
"loss": 0.037,
"step": 290
},
{
"epoch": 1.9152,
"grad_norm": 3.7282930807176604,
"learning_rate": 6.851851851851853e-06,
"loss": 0.0373,
"step": 300
},
{
"epoch": 1.9792,
"grad_norm": 3.42282088810128,
"learning_rate": 6.7094017094017094e-06,
"loss": 0.0352,
"step": 310
},
{
"epoch": 2.0384,
"grad_norm": 0.9105851467194691,
"learning_rate": 6.566951566951567e-06,
"loss": 0.0292,
"step": 320
},
{
"epoch": 2.1024,
"grad_norm": 1.244830667076524,
"learning_rate": 6.424501424501425e-06,
"loss": 0.0405,
"step": 330
},
{
"epoch": 2.1664,
"grad_norm": 1.0510058128223077,
"learning_rate": 6.282051282051282e-06,
"loss": 0.0282,
"step": 340
},
{
"epoch": 2.2304,
"grad_norm": 1.5807447986200327,
"learning_rate": 6.13960113960114e-06,
"loss": 0.0379,
"step": 350
},
{
"epoch": 2.2944,
"grad_norm": 2.9923562143253855,
"learning_rate": 5.997150997150998e-06,
"loss": 0.0362,
"step": 360
},
{
"epoch": 2.3584,
"grad_norm": 1.1768976103669906,
"learning_rate": 5.854700854700855e-06,
"loss": 0.0304,
"step": 370
},
{
"epoch": 2.4224,
"grad_norm": 1.0469384723844881,
"learning_rate": 5.7122507122507136e-06,
"loss": 0.0341,
"step": 380
},
{
"epoch": 2.4864,
"grad_norm": 3.0634909414310805,
"learning_rate": 5.56980056980057e-06,
"loss": 0.0455,
"step": 390
},
{
"epoch": 2.5504,
"grad_norm": 5.409022806393993,
"learning_rate": 5.4273504273504275e-06,
"loss": 0.0391,
"step": 400
},
{
"epoch": 2.6144,
"grad_norm": 2.341956839425436,
"learning_rate": 5.284900284900285e-06,
"loss": 0.0354,
"step": 410
},
{
"epoch": 2.6784,
"grad_norm": 2.511241308504355,
"learning_rate": 5.142450142450143e-06,
"loss": 0.0253,
"step": 420
},
{
"epoch": 2.7424,
"grad_norm": 0.690018698433535,
"learning_rate": 5e-06,
"loss": 0.0342,
"step": 430
},
{
"epoch": 2.8064,
"grad_norm": 1.4836436664927006,
"learning_rate": 4.857549857549858e-06,
"loss": 0.0289,
"step": 440
},
{
"epoch": 2.8704,
"grad_norm": 3.3287631024455555,
"learning_rate": 4.715099715099716e-06,
"loss": 0.034,
"step": 450
},
{
"epoch": 2.9344,
"grad_norm": 1.9028860466386455,
"learning_rate": 4.5726495726495725e-06,
"loss": 0.0394,
"step": 460
},
{
"epoch": 2.9984,
"grad_norm": 1.5644909433575458,
"learning_rate": 4.430199430199431e-06,
"loss": 0.0316,
"step": 470
},
{
"epoch": 3.0576,
"grad_norm": 0.5702118410044564,
"learning_rate": 4.287749287749288e-06,
"loss": 0.0271,
"step": 480
},
{
"epoch": 3.1216,
"grad_norm": 0.7240226489581562,
"learning_rate": 4.145299145299146e-06,
"loss": 0.0257,
"step": 490
},
{
"epoch": 3.1856,
"grad_norm": 1.3730314778183395,
"learning_rate": 4.002849002849003e-06,
"loss": 0.0242,
"step": 500
},
{
"epoch": 3.2496,
"grad_norm": 1.409286440913811,
"learning_rate": 3.860398860398861e-06,
"loss": 0.0176,
"step": 510
},
{
"epoch": 3.3136,
"grad_norm": 0.353728945819478,
"learning_rate": 3.7179487179487184e-06,
"loss": 0.0214,
"step": 520
},
{
"epoch": 3.3776,
"grad_norm": 0.6615032954587686,
"learning_rate": 3.5754985754985762e-06,
"loss": 0.0283,
"step": 530
},
{
"epoch": 3.4416,
"grad_norm": 1.047780597662055,
"learning_rate": 3.433048433048433e-06,
"loss": 0.0272,
"step": 540
},
{
"epoch": 3.5056000000000003,
"grad_norm": 1.0880693279764668,
"learning_rate": 3.290598290598291e-06,
"loss": 0.0191,
"step": 550
},
{
"epoch": 3.5696,
"grad_norm": 0.593950500649586,
"learning_rate": 3.1481481481481483e-06,
"loss": 0.0267,
"step": 560
},
{
"epoch": 3.6336,
"grad_norm": 1.4894282812370825,
"learning_rate": 3.005698005698006e-06,
"loss": 0.0397,
"step": 570
},
{
"epoch": 3.6976,
"grad_norm": 2.6737604623174818,
"learning_rate": 2.8632478632478635e-06,
"loss": 0.0271,
"step": 580
},
{
"epoch": 3.7616,
"grad_norm": 0.7247629374871387,
"learning_rate": 2.720797720797721e-06,
"loss": 0.0243,
"step": 590
},
{
"epoch": 3.8256,
"grad_norm": 2.025099186148271,
"learning_rate": 2.5783475783475787e-06,
"loss": 0.0235,
"step": 600
},
{
"epoch": 3.8895999999999997,
"grad_norm": 0.2161434673903271,
"learning_rate": 2.435897435897436e-06,
"loss": 0.0249,
"step": 610
},
{
"epoch": 3.9536,
"grad_norm": 0.9482360067668893,
"learning_rate": 2.293447293447294e-06,
"loss": 0.0186,
"step": 620
},
{
"epoch": 4.0128,
"grad_norm": 0.3787496021708608,
"learning_rate": 2.150997150997151e-06,
"loss": 0.0167,
"step": 630
},
{
"epoch": 4.0768,
"grad_norm": 1.306024706029847,
"learning_rate": 2.008547008547009e-06,
"loss": 0.0159,
"step": 640
},
{
"epoch": 4.1408,
"grad_norm": 3.6816675792077183,
"learning_rate": 1.8660968660968661e-06,
"loss": 0.0183,
"step": 650
},
{
"epoch": 4.2048,
"grad_norm": 0.9437993473728703,
"learning_rate": 1.723646723646724e-06,
"loss": 0.0137,
"step": 660
},
{
"epoch": 4.2688,
"grad_norm": 1.1491393759333486,
"learning_rate": 1.5811965811965813e-06,
"loss": 0.0176,
"step": 670
},
{
"epoch": 4.3328,
"grad_norm": 0.7015793398100599,
"learning_rate": 1.4387464387464389e-06,
"loss": 0.0142,
"step": 680
},
{
"epoch": 4.3968,
"grad_norm": 1.154214916933845,
"learning_rate": 1.2962962962962962e-06,
"loss": 0.0185,
"step": 690
},
{
"epoch": 4.4608,
"grad_norm": 0.1581403197209941,
"learning_rate": 1.153846153846154e-06,
"loss": 0.0097,
"step": 700
},
{
"epoch": 4.5248,
"grad_norm": 0.944788394994583,
"learning_rate": 1.0113960113960116e-06,
"loss": 0.0104,
"step": 710
},
{
"epoch": 4.5888,
"grad_norm": 1.399396791060691,
"learning_rate": 8.689458689458691e-07,
"loss": 0.0163,
"step": 720
},
{
"epoch": 4.6528,
"grad_norm": 1.1267866164560314,
"learning_rate": 7.264957264957266e-07,
"loss": 0.0151,
"step": 730
},
{
"epoch": 4.7168,
"grad_norm": 0.5615964881104024,
"learning_rate": 5.84045584045584e-07,
"loss": 0.0132,
"step": 740
},
{
"epoch": 4.7808,
"grad_norm": 1.0241538807872168,
"learning_rate": 4.415954415954416e-07,
"loss": 0.012,
"step": 750
},
{
"epoch": 4.8448,
"grad_norm": 1.9733087302730272,
"learning_rate": 2.991452991452992e-07,
"loss": 0.0152,
"step": 760
},
{
"epoch": 4.9088,
"grad_norm": 0.8383865793115187,
"learning_rate": 1.566951566951567e-07,
"loss": 0.0109,
"step": 770
},
{
"epoch": 4.9728,
"grad_norm": 0.5899609475557703,
"learning_rate": 1.4245014245014247e-08,
"loss": 0.0113,
"step": 780
},
{
"epoch": 4.9728,
"step": 780,
"total_flos": 544113163239424.0,
"train_loss": 0.0703721432827222,
"train_runtime": 36514.9195,
"train_samples_per_second": 2.739,
"train_steps_per_second": 0.021
}
],
"logging_steps": 10,
"max_steps": 780,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 544113163239424.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}