margin_reg_baseline / checkpoint-1722 /trainer_state.json
xxccho's picture
Upload folder using huggingface_hub
55ef881 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.0,
"eval_steps": 500,
"global_step": 1722,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.040774719673802244,
"grad_norm": 18.189022064208984,
"learning_rate": 9.000000000000001e-07,
"loss": 0.8515,
"step": 10
},
{
"epoch": 0.08154943934760449,
"grad_norm": 9.354066848754883,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.905,
"step": 20
},
{
"epoch": 0.12232415902140673,
"grad_norm": 10.508342742919922,
"learning_rate": 2.9e-06,
"loss": 0.8879,
"step": 30
},
{
"epoch": 0.16309887869520898,
"grad_norm": 9.291099548339844,
"learning_rate": 3.900000000000001e-06,
"loss": 0.8905,
"step": 40
},
{
"epoch": 0.2038735983690112,
"grad_norm": 13.063141822814941,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.8604,
"step": 50
},
{
"epoch": 0.24464831804281345,
"grad_norm": 7.156795024871826,
"learning_rate": 5.9e-06,
"loss": 0.7997,
"step": 60
},
{
"epoch": 0.2854230377166157,
"grad_norm": 12.09440803527832,
"learning_rate": 6.9e-06,
"loss": 0.7996,
"step": 70
},
{
"epoch": 0.32619775739041795,
"grad_norm": 14.44044303894043,
"learning_rate": 7.9e-06,
"loss": 0.8254,
"step": 80
},
{
"epoch": 0.3669724770642202,
"grad_norm": 12.786622047424316,
"learning_rate": 8.900000000000001e-06,
"loss": 0.8999,
"step": 90
},
{
"epoch": 0.4077471967380224,
"grad_norm": 14.936609268188477,
"learning_rate": 9.9e-06,
"loss": 0.7893,
"step": 100
},
{
"epoch": 0.44852191641182465,
"grad_norm": 12.025712013244629,
"learning_rate": 9.961864406779662e-06,
"loss": 0.8599,
"step": 110
},
{
"epoch": 0.4892966360856269,
"grad_norm": 18.530860900878906,
"learning_rate": 9.91949152542373e-06,
"loss": 0.7895,
"step": 120
},
{
"epoch": 0.5300713557594292,
"grad_norm": 10.44506549835205,
"learning_rate": 9.877118644067798e-06,
"loss": 0.8066,
"step": 130
},
{
"epoch": 0.5708460754332314,
"grad_norm": 18.924278259277344,
"learning_rate": 9.834745762711865e-06,
"loss": 0.8618,
"step": 140
},
{
"epoch": 0.6116207951070336,
"grad_norm": 18.275020599365234,
"learning_rate": 9.792372881355933e-06,
"loss": 0.8419,
"step": 150
},
{
"epoch": 0.6523955147808359,
"grad_norm": 11.579925537109375,
"learning_rate": 9.75e-06,
"loss": 0.8823,
"step": 160
},
{
"epoch": 0.6931702344546381,
"grad_norm": 13.954208374023438,
"learning_rate": 9.707627118644068e-06,
"loss": 0.8893,
"step": 170
},
{
"epoch": 0.7339449541284404,
"grad_norm": 11.016424179077148,
"learning_rate": 9.665254237288136e-06,
"loss": 0.9281,
"step": 180
},
{
"epoch": 0.7747196738022426,
"grad_norm": 9.621957778930664,
"learning_rate": 9.622881355932205e-06,
"loss": 0.8009,
"step": 190
},
{
"epoch": 0.8154943934760448,
"grad_norm": 17.310779571533203,
"learning_rate": 9.580508474576273e-06,
"loss": 0.8456,
"step": 200
},
{
"epoch": 0.8562691131498471,
"grad_norm": 10.48460578918457,
"learning_rate": 9.53813559322034e-06,
"loss": 0.7,
"step": 210
},
{
"epoch": 0.8970438328236493,
"grad_norm": 10.495179176330566,
"learning_rate": 9.495762711864408e-06,
"loss": 0.6497,
"step": 220
},
{
"epoch": 0.9378185524974516,
"grad_norm": 19.09280776977539,
"learning_rate": 9.453389830508474e-06,
"loss": 0.8303,
"step": 230
},
{
"epoch": 0.9785932721712538,
"grad_norm": 18.26506233215332,
"learning_rate": 9.411016949152543e-06,
"loss": 0.736,
"step": 240
},
{
"epoch": 1.0,
"eval_general_loss": 0.7072561979293823,
"eval_general_runtime": 258.0411,
"eval_general_samples_per_second": 3.546,
"eval_general_steps_per_second": 0.887,
"step": 246
},
{
"epoch": 1.0,
"eval_code_loss": 0.8012194037437439,
"eval_code_runtime": 301.035,
"eval_code_samples_per_second": 3.049,
"eval_code_steps_per_second": 0.764,
"step": 246
},
{
"epoch": 1.0,
"eval_stem_loss": 0.7626175284385681,
"eval_stem_runtime": 254.1723,
"eval_stem_samples_per_second": 3.596,
"eval_stem_steps_per_second": 0.901,
"step": 246
},
{
"epoch": 1.016309887869521,
"grad_norm": 14.707352638244629,
"learning_rate": 9.368644067796611e-06,
"loss": 0.6495,
"step": 250
},
{
"epoch": 1.0570846075433231,
"grad_norm": 14.996277809143066,
"learning_rate": 9.32627118644068e-06,
"loss": 0.6753,
"step": 260
},
{
"epoch": 1.0978593272171253,
"grad_norm": 16.44031524658203,
"learning_rate": 9.283898305084746e-06,
"loss": 0.6465,
"step": 270
},
{
"epoch": 1.1386340468909277,
"grad_norm": 12.770790100097656,
"learning_rate": 9.241525423728814e-06,
"loss": 0.7006,
"step": 280
},
{
"epoch": 1.17940876656473,
"grad_norm": 9.644815444946289,
"learning_rate": 9.199152542372882e-06,
"loss": 0.7345,
"step": 290
},
{
"epoch": 1.2201834862385321,
"grad_norm": 12.410354614257812,
"learning_rate": 9.15677966101695e-06,
"loss": 0.5812,
"step": 300
},
{
"epoch": 1.2609582059123343,
"grad_norm": 14.592584609985352,
"learning_rate": 9.114406779661017e-06,
"loss": 0.7479,
"step": 310
},
{
"epoch": 1.3017329255861365,
"grad_norm": 8.983531951904297,
"learning_rate": 9.072033898305086e-06,
"loss": 0.6735,
"step": 320
},
{
"epoch": 1.3425076452599387,
"grad_norm": 10.594444274902344,
"learning_rate": 9.029661016949152e-06,
"loss": 0.6125,
"step": 330
},
{
"epoch": 1.3832823649337411,
"grad_norm": 8.827781677246094,
"learning_rate": 8.98728813559322e-06,
"loss": 0.5886,
"step": 340
},
{
"epoch": 1.4240570846075433,
"grad_norm": 8.48524284362793,
"learning_rate": 8.944915254237289e-06,
"loss": 0.6677,
"step": 350
},
{
"epoch": 1.4648318042813455,
"grad_norm": 14.892068862915039,
"learning_rate": 8.902542372881357e-06,
"loss": 0.6921,
"step": 360
},
{
"epoch": 1.505606523955148,
"grad_norm": 13.356966018676758,
"learning_rate": 8.860169491525424e-06,
"loss": 0.7095,
"step": 370
},
{
"epoch": 1.5463812436289501,
"grad_norm": 10.22076416015625,
"learning_rate": 8.817796610169492e-06,
"loss": 0.6031,
"step": 380
},
{
"epoch": 1.5871559633027523,
"grad_norm": 9.876967430114746,
"learning_rate": 8.77542372881356e-06,
"loss": 0.5818,
"step": 390
},
{
"epoch": 1.6279306829765545,
"grad_norm": 9.625609397888184,
"learning_rate": 8.733050847457629e-06,
"loss": 0.5913,
"step": 400
},
{
"epoch": 1.6687054026503567,
"grad_norm": 8.925414085388184,
"learning_rate": 8.690677966101695e-06,
"loss": 0.578,
"step": 410
},
{
"epoch": 1.709480122324159,
"grad_norm": 7.93916130065918,
"learning_rate": 8.648305084745763e-06,
"loss": 0.6955,
"step": 420
},
{
"epoch": 1.750254841997961,
"grad_norm": 7.942169666290283,
"learning_rate": 8.60593220338983e-06,
"loss": 0.5284,
"step": 430
},
{
"epoch": 1.7910295616717635,
"grad_norm": 9.36421012878418,
"learning_rate": 8.563559322033898e-06,
"loss": 0.7499,
"step": 440
},
{
"epoch": 1.8318042813455657,
"grad_norm": 12.293156623840332,
"learning_rate": 8.521186440677967e-06,
"loss": 0.6923,
"step": 450
},
{
"epoch": 1.8725790010193681,
"grad_norm": 9.337946891784668,
"learning_rate": 8.478813559322035e-06,
"loss": 0.6138,
"step": 460
},
{
"epoch": 1.9133537206931703,
"grad_norm": 10.548377990722656,
"learning_rate": 8.436440677966102e-06,
"loss": 0.6029,
"step": 470
},
{
"epoch": 1.9541284403669725,
"grad_norm": 17.341978073120117,
"learning_rate": 8.39406779661017e-06,
"loss": 0.624,
"step": 480
},
{
"epoch": 1.9949031600407747,
"grad_norm": 13.546788215637207,
"learning_rate": 8.351694915254238e-06,
"loss": 0.6197,
"step": 490
},
{
"epoch": 2.0,
"eval_general_loss": 0.6272298097610474,
"eval_general_runtime": 255.3018,
"eval_general_samples_per_second": 3.584,
"eval_general_steps_per_second": 0.897,
"step": 492
},
{
"epoch": 2.0,
"eval_code_loss": 0.69377201795578,
"eval_code_runtime": 298.208,
"eval_code_samples_per_second": 3.078,
"eval_code_steps_per_second": 0.771,
"step": 492
},
{
"epoch": 2.0,
"eval_stem_loss": 0.7002437710762024,
"eval_stem_runtime": 251.7134,
"eval_stem_samples_per_second": 3.631,
"eval_stem_steps_per_second": 0.91,
"step": 492
},
{
"epoch": 2.032619775739042,
"grad_norm": 8.038460731506348,
"learning_rate": 8.309322033898306e-06,
"loss": 0.4989,
"step": 500
},
{
"epoch": 2.073394495412844,
"grad_norm": 11.866241455078125,
"learning_rate": 8.266949152542375e-06,
"loss": 0.6914,
"step": 510
},
{
"epoch": 2.1141692150866462,
"grad_norm": 6.622210502624512,
"learning_rate": 8.224576271186441e-06,
"loss": 0.5043,
"step": 520
},
{
"epoch": 2.1549439347604484,
"grad_norm": 11.639506340026855,
"learning_rate": 8.182203389830508e-06,
"loss": 0.6054,
"step": 530
},
{
"epoch": 2.1957186544342506,
"grad_norm": 8.884720802307129,
"learning_rate": 8.139830508474576e-06,
"loss": 0.5524,
"step": 540
},
{
"epoch": 2.236493374108053,
"grad_norm": 12.228666305541992,
"learning_rate": 8.097457627118644e-06,
"loss": 0.6011,
"step": 550
},
{
"epoch": 2.2772680937818555,
"grad_norm": 10.770685195922852,
"learning_rate": 8.055084745762713e-06,
"loss": 0.5481,
"step": 560
},
{
"epoch": 2.3180428134556577,
"grad_norm": 12.777567863464355,
"learning_rate": 8.012711864406781e-06,
"loss": 0.496,
"step": 570
},
{
"epoch": 2.35881753312946,
"grad_norm": 12.713486671447754,
"learning_rate": 7.970338983050848e-06,
"loss": 0.4764,
"step": 580
},
{
"epoch": 2.399592252803262,
"grad_norm": 15.633949279785156,
"learning_rate": 7.927966101694916e-06,
"loss": 0.6179,
"step": 590
},
{
"epoch": 2.4403669724770642,
"grad_norm": 10.710966110229492,
"learning_rate": 7.885593220338984e-06,
"loss": 0.5726,
"step": 600
},
{
"epoch": 2.4811416921508664,
"grad_norm": 10.519601821899414,
"learning_rate": 7.843220338983052e-06,
"loss": 0.4988,
"step": 610
},
{
"epoch": 2.5219164118246686,
"grad_norm": 7.1472392082214355,
"learning_rate": 7.800847457627119e-06,
"loss": 0.5648,
"step": 620
},
{
"epoch": 2.562691131498471,
"grad_norm": 13.704323768615723,
"learning_rate": 7.758474576271187e-06,
"loss": 0.6488,
"step": 630
},
{
"epoch": 2.603465851172273,
"grad_norm": 8.187666893005371,
"learning_rate": 7.716101694915254e-06,
"loss": 0.5047,
"step": 640
},
{
"epoch": 2.6442405708460752,
"grad_norm": 8.175298690795898,
"learning_rate": 7.673728813559322e-06,
"loss": 0.5948,
"step": 650
},
{
"epoch": 2.6850152905198774,
"grad_norm": 11.140478134155273,
"learning_rate": 7.63135593220339e-06,
"loss": 0.5212,
"step": 660
},
{
"epoch": 2.72579001019368,
"grad_norm": 10.118830680847168,
"learning_rate": 7.588983050847458e-06,
"loss": 0.506,
"step": 670
},
{
"epoch": 2.7665647298674823,
"grad_norm": 12.593578338623047,
"learning_rate": 7.546610169491526e-06,
"loss": 0.5584,
"step": 680
},
{
"epoch": 2.8073394495412844,
"grad_norm": 8.544402122497559,
"learning_rate": 7.504237288135594e-06,
"loss": 0.5202,
"step": 690
},
{
"epoch": 2.8481141692150866,
"grad_norm": 11.403071403503418,
"learning_rate": 7.461864406779662e-06,
"loss": 0.6049,
"step": 700
},
{
"epoch": 2.888888888888889,
"grad_norm": 12.620019912719727,
"learning_rate": 7.41949152542373e-06,
"loss": 0.5484,
"step": 710
},
{
"epoch": 2.929663608562691,
"grad_norm": 8.751862525939941,
"learning_rate": 7.377118644067798e-06,
"loss": 0.5454,
"step": 720
},
{
"epoch": 2.9704383282364932,
"grad_norm": 10.521064758300781,
"learning_rate": 7.334745762711864e-06,
"loss": 0.5242,
"step": 730
},
{
"epoch": 3.0,
"eval_general_loss": 0.5933007597923279,
"eval_general_runtime": 255.4722,
"eval_general_samples_per_second": 3.582,
"eval_general_steps_per_second": 0.896,
"step": 738
},
{
"epoch": 3.0,
"eval_code_loss": 0.6812524795532227,
"eval_code_runtime": 299.2052,
"eval_code_samples_per_second": 3.068,
"eval_code_steps_per_second": 0.769,
"step": 738
},
{
"epoch": 3.0,
"eval_stem_loss": 0.6883611679077148,
"eval_stem_runtime": 251.705,
"eval_stem_samples_per_second": 3.631,
"eval_stem_steps_per_second": 0.91,
"step": 738
},
{
"epoch": 3.0081549439347604,
"grad_norm": 5.164834022521973,
"learning_rate": 7.292372881355933e-06,
"loss": 0.5569,
"step": 740
},
{
"epoch": 3.0489296636085625,
"grad_norm": 8.926407814025879,
"learning_rate": 7.25e-06,
"loss": 0.512,
"step": 750
},
{
"epoch": 3.0897043832823647,
"grad_norm": 6.960042953491211,
"learning_rate": 7.207627118644068e-06,
"loss": 0.4701,
"step": 760
},
{
"epoch": 3.1304791029561674,
"grad_norm": 12.42642879486084,
"learning_rate": 7.165254237288136e-06,
"loss": 0.568,
"step": 770
},
{
"epoch": 3.1712538226299696,
"grad_norm": 13.820547103881836,
"learning_rate": 7.122881355932204e-06,
"loss": 0.5463,
"step": 780
},
{
"epoch": 3.2120285423037718,
"grad_norm": 10.348419189453125,
"learning_rate": 7.080508474576272e-06,
"loss": 0.5706,
"step": 790
},
{
"epoch": 3.252803261977574,
"grad_norm": 9.376842498779297,
"learning_rate": 7.03813559322034e-06,
"loss": 0.5124,
"step": 800
},
{
"epoch": 3.293577981651376,
"grad_norm": 9.71473217010498,
"learning_rate": 6.995762711864408e-06,
"loss": 0.4291,
"step": 810
},
{
"epoch": 3.3343527013251784,
"grad_norm": 11.891929626464844,
"learning_rate": 6.953389830508475e-06,
"loss": 0.5006,
"step": 820
},
{
"epoch": 3.3751274209989806,
"grad_norm": 7.028322219848633,
"learning_rate": 6.911016949152542e-06,
"loss": 0.4249,
"step": 830
},
{
"epoch": 3.4159021406727827,
"grad_norm": 6.9452009201049805,
"learning_rate": 6.86864406779661e-06,
"loss": 0.4576,
"step": 840
},
{
"epoch": 3.456676860346585,
"grad_norm": 9.390569686889648,
"learning_rate": 6.826271186440679e-06,
"loss": 0.4251,
"step": 850
},
{
"epoch": 3.497451580020387,
"grad_norm": 8.582491874694824,
"learning_rate": 6.783898305084746e-06,
"loss": 0.4872,
"step": 860
},
{
"epoch": 3.5382262996941893,
"grad_norm": 11.553034782409668,
"learning_rate": 6.741525423728814e-06,
"loss": 0.4284,
"step": 870
},
{
"epoch": 3.579001019367992,
"grad_norm": 13.446784019470215,
"learning_rate": 6.699152542372882e-06,
"loss": 0.4923,
"step": 880
},
{
"epoch": 3.619775739041794,
"grad_norm": 8.266268730163574,
"learning_rate": 6.65677966101695e-06,
"loss": 0.4186,
"step": 890
},
{
"epoch": 3.6605504587155964,
"grad_norm": 20.191940307617188,
"learning_rate": 6.614406779661018e-06,
"loss": 0.5954,
"step": 900
},
{
"epoch": 3.7013251783893986,
"grad_norm": 8.65824031829834,
"learning_rate": 6.572033898305086e-06,
"loss": 0.5261,
"step": 910
},
{
"epoch": 3.7420998980632008,
"grad_norm": 13.471404075622559,
"learning_rate": 6.5296610169491525e-06,
"loss": 0.4982,
"step": 920
},
{
"epoch": 3.782874617737003,
"grad_norm": 17.626903533935547,
"learning_rate": 6.487288135593221e-06,
"loss": 0.4446,
"step": 930
},
{
"epoch": 3.823649337410805,
"grad_norm": 18.55869483947754,
"learning_rate": 6.444915254237288e-06,
"loss": 0.4997,
"step": 940
},
{
"epoch": 3.864424057084608,
"grad_norm": 12.997821807861328,
"learning_rate": 6.4025423728813565e-06,
"loss": 0.5789,
"step": 950
},
{
"epoch": 3.90519877675841,
"grad_norm": 16.951148986816406,
"learning_rate": 6.360169491525424e-06,
"loss": 0.5246,
"step": 960
},
{
"epoch": 3.945973496432212,
"grad_norm": 12.54166030883789,
"learning_rate": 6.317796610169492e-06,
"loss": 0.4787,
"step": 970
},
{
"epoch": 3.9867482161060144,
"grad_norm": 13.518985748291016,
"learning_rate": 6.27542372881356e-06,
"loss": 0.5098,
"step": 980
},
{
"epoch": 4.0,
"eval_general_loss": 0.5695427060127258,
"eval_general_runtime": 255.8406,
"eval_general_samples_per_second": 3.576,
"eval_general_steps_per_second": 0.895,
"step": 984
},
{
"epoch": 4.0,
"eval_code_loss": 0.6757102608680725,
"eval_code_runtime": 297.9071,
"eval_code_samples_per_second": 3.081,
"eval_code_steps_per_second": 0.772,
"step": 984
},
{
"epoch": 4.0,
"eval_stem_loss": 0.6891591548919678,
"eval_stem_runtime": 251.3672,
"eval_stem_samples_per_second": 3.636,
"eval_stem_steps_per_second": 0.911,
"step": 984
},
{
"epoch": 4.0244648318042815,
"grad_norm": 10.329503059387207,
"learning_rate": 6.233050847457628e-06,
"loss": 0.4314,
"step": 990
},
{
"epoch": 4.065239551478084,
"grad_norm": 13.447497367858887,
"learning_rate": 6.190677966101695e-06,
"loss": 0.4775,
"step": 1000
},
{
"epoch": 4.106014271151886,
"grad_norm": 6.44377326965332,
"learning_rate": 6.148305084745764e-06,
"loss": 0.4268,
"step": 1010
},
{
"epoch": 4.146788990825688,
"grad_norm": 8.970619201660156,
"learning_rate": 6.10593220338983e-06,
"loss": 0.4198,
"step": 1020
},
{
"epoch": 4.18756371049949,
"grad_norm": 5.479137420654297,
"learning_rate": 6.0635593220338986e-06,
"loss": 0.4173,
"step": 1030
},
{
"epoch": 4.2283384301732925,
"grad_norm": 8.389227867126465,
"learning_rate": 6.021186440677966e-06,
"loss": 0.4917,
"step": 1040
},
{
"epoch": 4.269113149847095,
"grad_norm": 6.150132179260254,
"learning_rate": 5.978813559322034e-06,
"loss": 0.4613,
"step": 1050
},
{
"epoch": 4.309887869520897,
"grad_norm": 11.207158088684082,
"learning_rate": 5.936440677966102e-06,
"loss": 0.3489,
"step": 1060
},
{
"epoch": 4.350662589194699,
"grad_norm": 7.1269145011901855,
"learning_rate": 5.89406779661017e-06,
"loss": 0.4335,
"step": 1070
},
{
"epoch": 4.391437308868501,
"grad_norm": 6.830160140991211,
"learning_rate": 5.851694915254238e-06,
"loss": 0.4218,
"step": 1080
},
{
"epoch": 4.4322120285423035,
"grad_norm": 11.747178077697754,
"learning_rate": 5.809322033898306e-06,
"loss": 0.5124,
"step": 1090
},
{
"epoch": 4.472986748216106,
"grad_norm": 10.610580444335938,
"learning_rate": 5.766949152542374e-06,
"loss": 0.3823,
"step": 1100
},
{
"epoch": 4.513761467889909,
"grad_norm": 10.419934272766113,
"learning_rate": 5.7245762711864415e-06,
"loss": 0.4551,
"step": 1110
},
{
"epoch": 4.554536187563711,
"grad_norm": 9.977579116821289,
"learning_rate": 5.682203389830508e-06,
"loss": 0.3395,
"step": 1120
},
{
"epoch": 4.595310907237513,
"grad_norm": 22.077423095703125,
"learning_rate": 5.639830508474576e-06,
"loss": 0.5031,
"step": 1130
},
{
"epoch": 4.636085626911315,
"grad_norm": 8.453546524047852,
"learning_rate": 5.597457627118645e-06,
"loss": 0.3867,
"step": 1140
},
{
"epoch": 4.6768603465851175,
"grad_norm": 11.514775276184082,
"learning_rate": 5.555084745762712e-06,
"loss": 0.3771,
"step": 1150
},
{
"epoch": 4.71763506625892,
"grad_norm": 9.698599815368652,
"learning_rate": 5.51271186440678e-06,
"loss": 0.5053,
"step": 1160
},
{
"epoch": 4.758409785932722,
"grad_norm": 8.122666358947754,
"learning_rate": 5.470338983050848e-06,
"loss": 0.4136,
"step": 1170
},
{
"epoch": 4.799184505606524,
"grad_norm": 16.033802032470703,
"learning_rate": 5.427966101694916e-06,
"loss": 0.4073,
"step": 1180
},
{
"epoch": 4.839959225280326,
"grad_norm": 12.248979568481445,
"learning_rate": 5.3855932203389836e-06,
"loss": 0.4709,
"step": 1190
},
{
"epoch": 4.8807339449541285,
"grad_norm": 12.649367332458496,
"learning_rate": 5.343220338983052e-06,
"loss": 0.5022,
"step": 1200
},
{
"epoch": 4.921508664627931,
"grad_norm": 15.413296699523926,
"learning_rate": 5.300847457627119e-06,
"loss": 0.4231,
"step": 1210
},
{
"epoch": 4.962283384301733,
"grad_norm": 14.376687049865723,
"learning_rate": 5.258474576271187e-06,
"loss": 0.4172,
"step": 1220
},
{
"epoch": 5.0,
"grad_norm": 73.38561248779297,
"learning_rate": 5.216101694915254e-06,
"loss": 0.5843,
"step": 1230
},
{
"epoch": 5.0,
"eval_general_loss": 0.5886819958686829,
"eval_general_runtime": 256.5693,
"eval_general_samples_per_second": 3.566,
"eval_general_steps_per_second": 0.893,
"step": 1230
},
{
"epoch": 5.0,
"eval_code_loss": 0.667267918586731,
"eval_code_runtime": 298.4051,
"eval_code_samples_per_second": 3.076,
"eval_code_steps_per_second": 0.771,
"step": 1230
},
{
"epoch": 5.0,
"eval_stem_loss": 0.7440467476844788,
"eval_stem_runtime": 252.1598,
"eval_stem_samples_per_second": 3.625,
"eval_stem_steps_per_second": 0.908,
"step": 1230
},
{
"epoch": 5.040774719673802,
"grad_norm": 11.078204154968262,
"learning_rate": 5.1737288135593225e-06,
"loss": 0.3649,
"step": 1240
},
{
"epoch": 5.081549439347604,
"grad_norm": 17.9708251953125,
"learning_rate": 5.13135593220339e-06,
"loss": 0.3542,
"step": 1250
},
{
"epoch": 5.122324159021407,
"grad_norm": 23.31520652770996,
"learning_rate": 5.088983050847458e-06,
"loss": 0.4107,
"step": 1260
},
{
"epoch": 5.163098878695209,
"grad_norm": 11.54787826538086,
"learning_rate": 5.046610169491526e-06,
"loss": 0.3702,
"step": 1270
},
{
"epoch": 5.203873598369011,
"grad_norm": 14.472805976867676,
"learning_rate": 5.004237288135594e-06,
"loss": 0.4591,
"step": 1280
},
{
"epoch": 5.244648318042813,
"grad_norm": 8.058154106140137,
"learning_rate": 4.961864406779661e-06,
"loss": 0.3983,
"step": 1290
},
{
"epoch": 5.285423037716615,
"grad_norm": 10.769429206848145,
"learning_rate": 4.919491525423729e-06,
"loss": 0.3268,
"step": 1300
},
{
"epoch": 5.326197757390418,
"grad_norm": 18.2336368560791,
"learning_rate": 4.877118644067797e-06,
"loss": 0.2733,
"step": 1310
},
{
"epoch": 5.36697247706422,
"grad_norm": 9.624357223510742,
"learning_rate": 4.8347457627118645e-06,
"loss": 0.3423,
"step": 1320
},
{
"epoch": 5.407747196738022,
"grad_norm": 20.92825698852539,
"learning_rate": 4.792372881355933e-06,
"loss": 0.4394,
"step": 1330
},
{
"epoch": 5.448521916411825,
"grad_norm": 20.59296226501465,
"learning_rate": 4.75e-06,
"loss": 0.3442,
"step": 1340
},
{
"epoch": 5.489296636085627,
"grad_norm": 11.993790626525879,
"learning_rate": 4.707627118644068e-06,
"loss": 0.3717,
"step": 1350
},
{
"epoch": 5.530071355759429,
"grad_norm": 14.13767147064209,
"learning_rate": 4.665254237288136e-06,
"loss": 0.465,
"step": 1360
},
{
"epoch": 5.570846075433232,
"grad_norm": 17.664396286010742,
"learning_rate": 4.622881355932204e-06,
"loss": 0.3928,
"step": 1370
},
{
"epoch": 5.611620795107034,
"grad_norm": 8.398447036743164,
"learning_rate": 4.580508474576272e-06,
"loss": 0.3448,
"step": 1380
},
{
"epoch": 5.652395514780836,
"grad_norm": 15.034809112548828,
"learning_rate": 4.538135593220339e-06,
"loss": 0.3503,
"step": 1390
},
{
"epoch": 5.693170234454638,
"grad_norm": 13.326154708862305,
"learning_rate": 4.4957627118644075e-06,
"loss": 0.4093,
"step": 1400
},
{
"epoch": 5.73394495412844,
"grad_norm": 35.410621643066406,
"learning_rate": 4.453389830508475e-06,
"loss": 0.4609,
"step": 1410
},
{
"epoch": 5.774719673802243,
"grad_norm": 10.093385696411133,
"learning_rate": 4.411016949152543e-06,
"loss": 0.3377,
"step": 1420
},
{
"epoch": 5.815494393476045,
"grad_norm": 4.060236930847168,
"learning_rate": 4.368644067796611e-06,
"loss": 0.3625,
"step": 1430
},
{
"epoch": 5.856269113149847,
"grad_norm": 12.651063919067383,
"learning_rate": 4.326271186440678e-06,
"loss": 0.3744,
"step": 1440
},
{
"epoch": 5.897043832823649,
"grad_norm": 17.561542510986328,
"learning_rate": 4.283898305084746e-06,
"loss": 0.3624,
"step": 1450
},
{
"epoch": 5.937818552497451,
"grad_norm": 17.572458267211914,
"learning_rate": 4.241525423728814e-06,
"loss": 0.4267,
"step": 1460
},
{
"epoch": 5.978593272171254,
"grad_norm": 15.854689598083496,
"learning_rate": 4.199152542372882e-06,
"loss": 0.3535,
"step": 1470
},
{
"epoch": 6.0,
"eval_general_loss": 0.586328387260437,
"eval_general_runtime": 256.6674,
"eval_general_samples_per_second": 3.565,
"eval_general_steps_per_second": 0.892,
"step": 1476
},
{
"epoch": 6.0,
"eval_code_loss": 0.6865141987800598,
"eval_code_runtime": 299.8173,
"eval_code_samples_per_second": 3.062,
"eval_code_steps_per_second": 0.767,
"step": 1476
},
{
"epoch": 6.0,
"eval_stem_loss": 0.7701802253723145,
"eval_stem_runtime": 253.0522,
"eval_stem_samples_per_second": 3.612,
"eval_stem_steps_per_second": 0.905,
"step": 1476
},
{
"epoch": 6.016309887869521,
"grad_norm": 21.478351593017578,
"learning_rate": 4.1567796610169495e-06,
"loss": 0.3105,
"step": 1480
},
{
"epoch": 6.057084607543323,
"grad_norm": 14.138855934143066,
"learning_rate": 4.114406779661017e-06,
"loss": 0.2904,
"step": 1490
},
{
"epoch": 6.097859327217125,
"grad_norm": 4.692451000213623,
"learning_rate": 4.072033898305085e-06,
"loss": 0.2857,
"step": 1500
},
{
"epoch": 6.138634046890927,
"grad_norm": 12.039435386657715,
"learning_rate": 4.029661016949153e-06,
"loss": 0.2959,
"step": 1510
},
{
"epoch": 6.1794087665647295,
"grad_norm": 15.02013111114502,
"learning_rate": 3.987288135593221e-06,
"loss": 0.2821,
"step": 1520
},
{
"epoch": 6.220183486238533,
"grad_norm": 9.34999942779541,
"learning_rate": 3.9449152542372884e-06,
"loss": 0.3188,
"step": 1530
},
{
"epoch": 6.260958205912335,
"grad_norm": 6.76429557800293,
"learning_rate": 3.902542372881356e-06,
"loss": 0.3847,
"step": 1540
},
{
"epoch": 6.301732925586137,
"grad_norm": 18.529054641723633,
"learning_rate": 3.860169491525424e-06,
"loss": 0.3834,
"step": 1550
},
{
"epoch": 6.342507645259939,
"grad_norm": 18.2543888092041,
"learning_rate": 3.817796610169492e-06,
"loss": 0.3482,
"step": 1560
},
{
"epoch": 6.383282364933741,
"grad_norm": 13.644851684570312,
"learning_rate": 3.77542372881356e-06,
"loss": 0.3079,
"step": 1570
},
{
"epoch": 6.4240570846075435,
"grad_norm": 15.071892738342285,
"learning_rate": 3.7330508474576277e-06,
"loss": 0.3469,
"step": 1580
},
{
"epoch": 6.464831804281346,
"grad_norm": 19.419057846069336,
"learning_rate": 3.690677966101695e-06,
"loss": 0.2463,
"step": 1590
},
{
"epoch": 6.505606523955148,
"grad_norm": 7.33676290512085,
"learning_rate": 3.648305084745763e-06,
"loss": 0.3431,
"step": 1600
},
{
"epoch": 6.54638124362895,
"grad_norm": 9.915693283081055,
"learning_rate": 3.605932203389831e-06,
"loss": 0.3703,
"step": 1610
},
{
"epoch": 6.587155963302752,
"grad_norm": 21.167028427124023,
"learning_rate": 3.5635593220338988e-06,
"loss": 0.3688,
"step": 1620
},
{
"epoch": 6.6279306829765545,
"grad_norm": 9.088415145874023,
"learning_rate": 3.5211864406779667e-06,
"loss": 0.3394,
"step": 1630
},
{
"epoch": 6.668705402650357,
"grad_norm": 22.053159713745117,
"learning_rate": 3.478813559322034e-06,
"loss": 0.3261,
"step": 1640
},
{
"epoch": 6.709480122324159,
"grad_norm": 12.980785369873047,
"learning_rate": 3.436440677966102e-06,
"loss": 0.3494,
"step": 1650
},
{
"epoch": 6.750254841997961,
"grad_norm": 17.203041076660156,
"learning_rate": 3.39406779661017e-06,
"loss": 0.3862,
"step": 1660
},
{
"epoch": 6.791029561671763,
"grad_norm": 14.678689002990723,
"learning_rate": 3.3516949152542377e-06,
"loss": 0.3752,
"step": 1670
},
{
"epoch": 6.8318042813455655,
"grad_norm": 9.484582901000977,
"learning_rate": 3.309322033898305e-06,
"loss": 0.2588,
"step": 1680
},
{
"epoch": 6.872579001019368,
"grad_norm": 6.880331516265869,
"learning_rate": 3.266949152542373e-06,
"loss": 0.2594,
"step": 1690
},
{
"epoch": 6.91335372069317,
"grad_norm": 15.112305641174316,
"learning_rate": 3.224576271186441e-06,
"loss": 0.3514,
"step": 1700
},
{
"epoch": 6.954128440366972,
"grad_norm": 12.322484016418457,
"learning_rate": 3.1822033898305087e-06,
"loss": 0.2714,
"step": 1710
},
{
"epoch": 6.994903160040774,
"grad_norm": 13.882013320922852,
"learning_rate": 3.1398305084745766e-06,
"loss": 0.2907,
"step": 1720
},
{
"epoch": 7.0,
"eval_general_loss": 0.6201872229576111,
"eval_general_runtime": 255.9089,
"eval_general_samples_per_second": 3.575,
"eval_general_steps_per_second": 0.895,
"step": 1722
},
{
"epoch": 7.0,
"eval_code_loss": 0.7187511920928955,
"eval_code_runtime": 298.4259,
"eval_code_samples_per_second": 3.076,
"eval_code_steps_per_second": 0.771,
"step": 1722
},
{
"epoch": 7.0,
"eval_stem_loss": 0.8482697606086731,
"eval_stem_runtime": 251.9587,
"eval_stem_samples_per_second": 3.628,
"eval_stem_steps_per_second": 0.909,
"step": 1722
}
],
"logging_steps": 10,
"max_steps": 2460,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.7744572785853727e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}