sedrickkeh's picture
End of training
455ba30 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9952,
"eval_steps": 500,
"global_step": 936,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.032,
"grad_norm": 11.096345041133668,
"learning_rate": 5e-06,
"loss": 0.9124,
"step": 10
},
{
"epoch": 0.064,
"grad_norm": 2.0005484215366707,
"learning_rate": 5e-06,
"loss": 0.8231,
"step": 20
},
{
"epoch": 0.096,
"grad_norm": 1.7931305619074691,
"learning_rate": 5e-06,
"loss": 0.7884,
"step": 30
},
{
"epoch": 0.128,
"grad_norm": 0.851189870496473,
"learning_rate": 5e-06,
"loss": 0.7635,
"step": 40
},
{
"epoch": 0.16,
"grad_norm": 0.9889043585826149,
"learning_rate": 5e-06,
"loss": 0.7591,
"step": 50
},
{
"epoch": 0.192,
"grad_norm": 0.8749373478404373,
"learning_rate": 5e-06,
"loss": 0.7381,
"step": 60
},
{
"epoch": 0.224,
"grad_norm": 1.0946174467023697,
"learning_rate": 5e-06,
"loss": 0.7318,
"step": 70
},
{
"epoch": 0.256,
"grad_norm": 0.9291846880748199,
"learning_rate": 5e-06,
"loss": 0.7179,
"step": 80
},
{
"epoch": 0.288,
"grad_norm": 0.5509636442658649,
"learning_rate": 5e-06,
"loss": 0.7139,
"step": 90
},
{
"epoch": 0.32,
"grad_norm": 0.593197960775897,
"learning_rate": 5e-06,
"loss": 0.7063,
"step": 100
},
{
"epoch": 0.352,
"grad_norm": 0.9507443053374502,
"learning_rate": 5e-06,
"loss": 0.7116,
"step": 110
},
{
"epoch": 0.384,
"grad_norm": 0.8188616654629195,
"learning_rate": 5e-06,
"loss": 0.7087,
"step": 120
},
{
"epoch": 0.416,
"grad_norm": 0.8138893569857725,
"learning_rate": 5e-06,
"loss": 0.7071,
"step": 130
},
{
"epoch": 0.448,
"grad_norm": 0.6168823207810579,
"learning_rate": 5e-06,
"loss": 0.707,
"step": 140
},
{
"epoch": 0.48,
"grad_norm": 0.5851796811481069,
"learning_rate": 5e-06,
"loss": 0.7042,
"step": 150
},
{
"epoch": 0.512,
"grad_norm": 0.6381484672996786,
"learning_rate": 5e-06,
"loss": 0.6966,
"step": 160
},
{
"epoch": 0.544,
"grad_norm": 1.1453372921610079,
"learning_rate": 5e-06,
"loss": 0.6971,
"step": 170
},
{
"epoch": 0.576,
"grad_norm": 0.45645996706514147,
"learning_rate": 5e-06,
"loss": 0.7013,
"step": 180
},
{
"epoch": 0.608,
"grad_norm": 0.6963644924074289,
"learning_rate": 5e-06,
"loss": 0.7016,
"step": 190
},
{
"epoch": 0.64,
"grad_norm": 0.6629526941218145,
"learning_rate": 5e-06,
"loss": 0.6907,
"step": 200
},
{
"epoch": 0.672,
"grad_norm": 0.46342013498108614,
"learning_rate": 5e-06,
"loss": 0.6947,
"step": 210
},
{
"epoch": 0.704,
"grad_norm": 0.5188693861900772,
"learning_rate": 5e-06,
"loss": 0.6947,
"step": 220
},
{
"epoch": 0.736,
"grad_norm": 0.46151796592999117,
"learning_rate": 5e-06,
"loss": 0.6888,
"step": 230
},
{
"epoch": 0.768,
"grad_norm": 0.5322678506039507,
"learning_rate": 5e-06,
"loss": 0.6933,
"step": 240
},
{
"epoch": 0.8,
"grad_norm": 0.4694684079232731,
"learning_rate": 5e-06,
"loss": 0.6907,
"step": 250
},
{
"epoch": 0.832,
"grad_norm": 0.5841370812658152,
"learning_rate": 5e-06,
"loss": 0.6932,
"step": 260
},
{
"epoch": 0.864,
"grad_norm": 0.49903482096358526,
"learning_rate": 5e-06,
"loss": 0.6937,
"step": 270
},
{
"epoch": 0.896,
"grad_norm": 0.5304368954337139,
"learning_rate": 5e-06,
"loss": 0.6845,
"step": 280
},
{
"epoch": 0.928,
"grad_norm": 0.6838692426374734,
"learning_rate": 5e-06,
"loss": 0.6842,
"step": 290
},
{
"epoch": 0.96,
"grad_norm": 0.6180107533054702,
"learning_rate": 5e-06,
"loss": 0.6772,
"step": 300
},
{
"epoch": 0.992,
"grad_norm": 0.550556182335329,
"learning_rate": 5e-06,
"loss": 0.6705,
"step": 310
},
{
"epoch": 0.9984,
"eval_loss": 0.6830303072929382,
"eval_runtime": 166.7227,
"eval_samples_per_second": 50.491,
"eval_steps_per_second": 0.396,
"step": 312
},
{
"epoch": 1.024,
"grad_norm": 0.7196672666972574,
"learning_rate": 5e-06,
"loss": 0.6414,
"step": 320
},
{
"epoch": 1.056,
"grad_norm": 0.6864929334563745,
"learning_rate": 5e-06,
"loss": 0.6315,
"step": 330
},
{
"epoch": 1.088,
"grad_norm": 0.7530113138351922,
"learning_rate": 5e-06,
"loss": 0.6361,
"step": 340
},
{
"epoch": 1.12,
"grad_norm": 0.5287710086719117,
"learning_rate": 5e-06,
"loss": 0.6356,
"step": 350
},
{
"epoch": 1.152,
"grad_norm": 0.5840075309781476,
"learning_rate": 5e-06,
"loss": 0.6322,
"step": 360
},
{
"epoch": 1.184,
"grad_norm": 0.5008293781573286,
"learning_rate": 5e-06,
"loss": 0.648,
"step": 370
},
{
"epoch": 1.216,
"grad_norm": 0.5734259910470129,
"learning_rate": 5e-06,
"loss": 0.6344,
"step": 380
},
{
"epoch": 1.248,
"grad_norm": 0.5742943072984884,
"learning_rate": 5e-06,
"loss": 0.634,
"step": 390
},
{
"epoch": 1.28,
"grad_norm": 0.5646558201841438,
"learning_rate": 5e-06,
"loss": 0.6367,
"step": 400
},
{
"epoch": 1.312,
"grad_norm": 0.582405938579332,
"learning_rate": 5e-06,
"loss": 0.6333,
"step": 410
},
{
"epoch": 1.3439999999999999,
"grad_norm": 0.5464516067637085,
"learning_rate": 5e-06,
"loss": 0.627,
"step": 420
},
{
"epoch": 1.376,
"grad_norm": 0.526922439327205,
"learning_rate": 5e-06,
"loss": 0.6413,
"step": 430
},
{
"epoch": 1.408,
"grad_norm": 0.5460672292153811,
"learning_rate": 5e-06,
"loss": 0.6293,
"step": 440
},
{
"epoch": 1.44,
"grad_norm": 0.5139358671369992,
"learning_rate": 5e-06,
"loss": 0.63,
"step": 450
},
{
"epoch": 1.472,
"grad_norm": 0.5915093665608144,
"learning_rate": 5e-06,
"loss": 0.6459,
"step": 460
},
{
"epoch": 1.504,
"grad_norm": 0.5570676911617528,
"learning_rate": 5e-06,
"loss": 0.6376,
"step": 470
},
{
"epoch": 1.536,
"grad_norm": 0.5728608013877954,
"learning_rate": 5e-06,
"loss": 0.6401,
"step": 480
},
{
"epoch": 1.568,
"grad_norm": 0.4896266564265616,
"learning_rate": 5e-06,
"loss": 0.6369,
"step": 490
},
{
"epoch": 1.6,
"grad_norm": 0.4962652191813324,
"learning_rate": 5e-06,
"loss": 0.6415,
"step": 500
},
{
"epoch": 1.6320000000000001,
"grad_norm": 0.5518896628136374,
"learning_rate": 5e-06,
"loss": 0.6287,
"step": 510
},
{
"epoch": 1.6640000000000001,
"grad_norm": 0.43944356606683,
"learning_rate": 5e-06,
"loss": 0.6387,
"step": 520
},
{
"epoch": 1.696,
"grad_norm": 0.5183777489924765,
"learning_rate": 5e-06,
"loss": 0.6376,
"step": 530
},
{
"epoch": 1.728,
"grad_norm": 0.4913940452170003,
"learning_rate": 5e-06,
"loss": 0.6284,
"step": 540
},
{
"epoch": 1.76,
"grad_norm": 0.6170566214388444,
"learning_rate": 5e-06,
"loss": 0.6356,
"step": 550
},
{
"epoch": 1.792,
"grad_norm": 0.6377778383394684,
"learning_rate": 5e-06,
"loss": 0.6403,
"step": 560
},
{
"epoch": 1.8239999999999998,
"grad_norm": 0.5663340732106481,
"learning_rate": 5e-06,
"loss": 0.6449,
"step": 570
},
{
"epoch": 1.8559999999999999,
"grad_norm": 0.5736342081997541,
"learning_rate": 5e-06,
"loss": 0.6387,
"step": 580
},
{
"epoch": 1.888,
"grad_norm": 0.49306085144025097,
"learning_rate": 5e-06,
"loss": 0.6241,
"step": 590
},
{
"epoch": 1.92,
"grad_norm": 0.5775658004357387,
"learning_rate": 5e-06,
"loss": 0.6271,
"step": 600
},
{
"epoch": 1.952,
"grad_norm": 0.5577832354086514,
"learning_rate": 5e-06,
"loss": 0.634,
"step": 610
},
{
"epoch": 1.984,
"grad_norm": 0.5187494053729239,
"learning_rate": 5e-06,
"loss": 0.6388,
"step": 620
},
{
"epoch": 2.0,
"eval_loss": 0.6722739934921265,
"eval_runtime": 167.9944,
"eval_samples_per_second": 50.109,
"eval_steps_per_second": 0.393,
"step": 625
},
{
"epoch": 2.016,
"grad_norm": 0.7554552806950715,
"learning_rate": 5e-06,
"loss": 0.609,
"step": 630
},
{
"epoch": 2.048,
"grad_norm": 0.7015587384161144,
"learning_rate": 5e-06,
"loss": 0.5764,
"step": 640
},
{
"epoch": 2.08,
"grad_norm": 0.6710139474563853,
"learning_rate": 5e-06,
"loss": 0.583,
"step": 650
},
{
"epoch": 2.112,
"grad_norm": 0.5068349693062657,
"learning_rate": 5e-06,
"loss": 0.592,
"step": 660
},
{
"epoch": 2.144,
"grad_norm": 0.5399107301121943,
"learning_rate": 5e-06,
"loss": 0.5891,
"step": 670
},
{
"epoch": 2.176,
"grad_norm": 0.5827025174626241,
"learning_rate": 5e-06,
"loss": 0.5862,
"step": 680
},
{
"epoch": 2.208,
"grad_norm": 0.6588198083574598,
"learning_rate": 5e-06,
"loss": 0.581,
"step": 690
},
{
"epoch": 2.24,
"grad_norm": 0.5284463724994527,
"learning_rate": 5e-06,
"loss": 0.5895,
"step": 700
},
{
"epoch": 2.2720000000000002,
"grad_norm": 0.5554858601981788,
"learning_rate": 5e-06,
"loss": 0.582,
"step": 710
},
{
"epoch": 2.304,
"grad_norm": 0.5076164990838873,
"learning_rate": 5e-06,
"loss": 0.591,
"step": 720
},
{
"epoch": 2.336,
"grad_norm": 0.5467190742693613,
"learning_rate": 5e-06,
"loss": 0.5947,
"step": 730
},
{
"epoch": 2.368,
"grad_norm": 0.6169750992589544,
"learning_rate": 5e-06,
"loss": 0.5972,
"step": 740
},
{
"epoch": 2.4,
"grad_norm": 0.48044936244386016,
"learning_rate": 5e-06,
"loss": 0.5876,
"step": 750
},
{
"epoch": 2.432,
"grad_norm": 0.6032706176552372,
"learning_rate": 5e-06,
"loss": 0.587,
"step": 760
},
{
"epoch": 2.464,
"grad_norm": 0.6063975913360607,
"learning_rate": 5e-06,
"loss": 0.5925,
"step": 770
},
{
"epoch": 2.496,
"grad_norm": 0.6086757160459484,
"learning_rate": 5e-06,
"loss": 0.5866,
"step": 780
},
{
"epoch": 2.528,
"grad_norm": 0.5468098219597467,
"learning_rate": 5e-06,
"loss": 0.5833,
"step": 790
},
{
"epoch": 2.56,
"grad_norm": 0.5487999795278167,
"learning_rate": 5e-06,
"loss": 0.5833,
"step": 800
},
{
"epoch": 2.592,
"grad_norm": 0.549327963090587,
"learning_rate": 5e-06,
"loss": 0.5901,
"step": 810
},
{
"epoch": 2.624,
"grad_norm": 0.7150840433104585,
"learning_rate": 5e-06,
"loss": 0.5953,
"step": 820
},
{
"epoch": 2.656,
"grad_norm": 0.5334250102952837,
"learning_rate": 5e-06,
"loss": 0.5921,
"step": 830
},
{
"epoch": 2.6879999999999997,
"grad_norm": 0.4984756567264573,
"learning_rate": 5e-06,
"loss": 0.5909,
"step": 840
},
{
"epoch": 2.7199999999999998,
"grad_norm": 0.5260124201884769,
"learning_rate": 5e-06,
"loss": 0.5891,
"step": 850
},
{
"epoch": 2.752,
"grad_norm": 0.4768707573361185,
"learning_rate": 5e-06,
"loss": 0.5966,
"step": 860
},
{
"epoch": 2.784,
"grad_norm": 0.6307214695791044,
"learning_rate": 5e-06,
"loss": 0.5956,
"step": 870
},
{
"epoch": 2.816,
"grad_norm": 0.6369711933029217,
"learning_rate": 5e-06,
"loss": 0.5906,
"step": 880
},
{
"epoch": 2.848,
"grad_norm": 0.5331564084605998,
"learning_rate": 5e-06,
"loss": 0.5944,
"step": 890
},
{
"epoch": 2.88,
"grad_norm": 0.49279217252034724,
"learning_rate": 5e-06,
"loss": 0.588,
"step": 900
},
{
"epoch": 2.912,
"grad_norm": 0.570357514747828,
"learning_rate": 5e-06,
"loss": 0.5915,
"step": 910
},
{
"epoch": 2.944,
"grad_norm": 0.78678569178475,
"learning_rate": 5e-06,
"loss": 0.6007,
"step": 920
},
{
"epoch": 2.976,
"grad_norm": 0.561666413771167,
"learning_rate": 5e-06,
"loss": 0.5952,
"step": 930
},
{
"epoch": 2.9952,
"eval_loss": 0.6738138794898987,
"eval_runtime": 168.233,
"eval_samples_per_second": 50.038,
"eval_steps_per_second": 0.392,
"step": 936
},
{
"epoch": 2.9952,
"step": 936,
"total_flos": 1567416102420480.0,
"train_loss": 0.647635899293117,
"train_runtime": 28091.6471,
"train_samples_per_second": 17.079,
"train_steps_per_second": 0.033
}
],
"logging_steps": 10,
"max_steps": 936,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1567416102420480.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}