margin_reg_baseline / checkpoint-738 /trainer_state.json
xxccho's picture
Upload folder using huggingface_hub
55ef881 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 738,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.040774719673802244,
"grad_norm": 18.189022064208984,
"learning_rate": 9.000000000000001e-07,
"loss": 0.8515,
"step": 10
},
{
"epoch": 0.08154943934760449,
"grad_norm": 9.354066848754883,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.905,
"step": 20
},
{
"epoch": 0.12232415902140673,
"grad_norm": 10.508342742919922,
"learning_rate": 2.9e-06,
"loss": 0.8879,
"step": 30
},
{
"epoch": 0.16309887869520898,
"grad_norm": 9.291099548339844,
"learning_rate": 3.900000000000001e-06,
"loss": 0.8905,
"step": 40
},
{
"epoch": 0.2038735983690112,
"grad_norm": 13.063141822814941,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.8604,
"step": 50
},
{
"epoch": 0.24464831804281345,
"grad_norm": 7.156795024871826,
"learning_rate": 5.9e-06,
"loss": 0.7997,
"step": 60
},
{
"epoch": 0.2854230377166157,
"grad_norm": 12.09440803527832,
"learning_rate": 6.9e-06,
"loss": 0.7996,
"step": 70
},
{
"epoch": 0.32619775739041795,
"grad_norm": 14.44044303894043,
"learning_rate": 7.9e-06,
"loss": 0.8254,
"step": 80
},
{
"epoch": 0.3669724770642202,
"grad_norm": 12.786622047424316,
"learning_rate": 8.900000000000001e-06,
"loss": 0.8999,
"step": 90
},
{
"epoch": 0.4077471967380224,
"grad_norm": 14.936609268188477,
"learning_rate": 9.9e-06,
"loss": 0.7893,
"step": 100
},
{
"epoch": 0.44852191641182465,
"grad_norm": 12.025712013244629,
"learning_rate": 9.961864406779662e-06,
"loss": 0.8599,
"step": 110
},
{
"epoch": 0.4892966360856269,
"grad_norm": 18.530860900878906,
"learning_rate": 9.91949152542373e-06,
"loss": 0.7895,
"step": 120
},
{
"epoch": 0.5300713557594292,
"grad_norm": 10.44506549835205,
"learning_rate": 9.877118644067798e-06,
"loss": 0.8066,
"step": 130
},
{
"epoch": 0.5708460754332314,
"grad_norm": 18.924278259277344,
"learning_rate": 9.834745762711865e-06,
"loss": 0.8618,
"step": 140
},
{
"epoch": 0.6116207951070336,
"grad_norm": 18.275020599365234,
"learning_rate": 9.792372881355933e-06,
"loss": 0.8419,
"step": 150
},
{
"epoch": 0.6523955147808359,
"grad_norm": 11.579925537109375,
"learning_rate": 9.75e-06,
"loss": 0.8823,
"step": 160
},
{
"epoch": 0.6931702344546381,
"grad_norm": 13.954208374023438,
"learning_rate": 9.707627118644068e-06,
"loss": 0.8893,
"step": 170
},
{
"epoch": 0.7339449541284404,
"grad_norm": 11.016424179077148,
"learning_rate": 9.665254237288136e-06,
"loss": 0.9281,
"step": 180
},
{
"epoch": 0.7747196738022426,
"grad_norm": 9.621957778930664,
"learning_rate": 9.622881355932205e-06,
"loss": 0.8009,
"step": 190
},
{
"epoch": 0.8154943934760448,
"grad_norm": 17.310779571533203,
"learning_rate": 9.580508474576273e-06,
"loss": 0.8456,
"step": 200
},
{
"epoch": 0.8562691131498471,
"grad_norm": 10.48460578918457,
"learning_rate": 9.53813559322034e-06,
"loss": 0.7,
"step": 210
},
{
"epoch": 0.8970438328236493,
"grad_norm": 10.495179176330566,
"learning_rate": 9.495762711864408e-06,
"loss": 0.6497,
"step": 220
},
{
"epoch": 0.9378185524974516,
"grad_norm": 19.09280776977539,
"learning_rate": 9.453389830508474e-06,
"loss": 0.8303,
"step": 230
},
{
"epoch": 0.9785932721712538,
"grad_norm": 18.26506233215332,
"learning_rate": 9.411016949152543e-06,
"loss": 0.736,
"step": 240
},
{
"epoch": 1.0,
"eval_general_loss": 0.7072561979293823,
"eval_general_runtime": 258.0411,
"eval_general_samples_per_second": 3.546,
"eval_general_steps_per_second": 0.887,
"step": 246
},
{
"epoch": 1.0,
"eval_code_loss": 0.8012194037437439,
"eval_code_runtime": 301.035,
"eval_code_samples_per_second": 3.049,
"eval_code_steps_per_second": 0.764,
"step": 246
},
{
"epoch": 1.0,
"eval_stem_loss": 0.7626175284385681,
"eval_stem_runtime": 254.1723,
"eval_stem_samples_per_second": 3.596,
"eval_stem_steps_per_second": 0.901,
"step": 246
},
{
"epoch": 1.016309887869521,
"grad_norm": 14.707352638244629,
"learning_rate": 9.368644067796611e-06,
"loss": 0.6495,
"step": 250
},
{
"epoch": 1.0570846075433231,
"grad_norm": 14.996277809143066,
"learning_rate": 9.32627118644068e-06,
"loss": 0.6753,
"step": 260
},
{
"epoch": 1.0978593272171253,
"grad_norm": 16.44031524658203,
"learning_rate": 9.283898305084746e-06,
"loss": 0.6465,
"step": 270
},
{
"epoch": 1.1386340468909277,
"grad_norm": 12.770790100097656,
"learning_rate": 9.241525423728814e-06,
"loss": 0.7006,
"step": 280
},
{
"epoch": 1.17940876656473,
"grad_norm": 9.644815444946289,
"learning_rate": 9.199152542372882e-06,
"loss": 0.7345,
"step": 290
},
{
"epoch": 1.2201834862385321,
"grad_norm": 12.410354614257812,
"learning_rate": 9.15677966101695e-06,
"loss": 0.5812,
"step": 300
},
{
"epoch": 1.2609582059123343,
"grad_norm": 14.592584609985352,
"learning_rate": 9.114406779661017e-06,
"loss": 0.7479,
"step": 310
},
{
"epoch": 1.3017329255861365,
"grad_norm": 8.983531951904297,
"learning_rate": 9.072033898305086e-06,
"loss": 0.6735,
"step": 320
},
{
"epoch": 1.3425076452599387,
"grad_norm": 10.594444274902344,
"learning_rate": 9.029661016949152e-06,
"loss": 0.6125,
"step": 330
},
{
"epoch": 1.3832823649337411,
"grad_norm": 8.827781677246094,
"learning_rate": 8.98728813559322e-06,
"loss": 0.5886,
"step": 340
},
{
"epoch": 1.4240570846075433,
"grad_norm": 8.48524284362793,
"learning_rate": 8.944915254237289e-06,
"loss": 0.6677,
"step": 350
},
{
"epoch": 1.4648318042813455,
"grad_norm": 14.892068862915039,
"learning_rate": 8.902542372881357e-06,
"loss": 0.6921,
"step": 360
},
{
"epoch": 1.505606523955148,
"grad_norm": 13.356966018676758,
"learning_rate": 8.860169491525424e-06,
"loss": 0.7095,
"step": 370
},
{
"epoch": 1.5463812436289501,
"grad_norm": 10.22076416015625,
"learning_rate": 8.817796610169492e-06,
"loss": 0.6031,
"step": 380
},
{
"epoch": 1.5871559633027523,
"grad_norm": 9.876967430114746,
"learning_rate": 8.77542372881356e-06,
"loss": 0.5818,
"step": 390
},
{
"epoch": 1.6279306829765545,
"grad_norm": 9.625609397888184,
"learning_rate": 8.733050847457629e-06,
"loss": 0.5913,
"step": 400
},
{
"epoch": 1.6687054026503567,
"grad_norm": 8.925414085388184,
"learning_rate": 8.690677966101695e-06,
"loss": 0.578,
"step": 410
},
{
"epoch": 1.709480122324159,
"grad_norm": 7.93916130065918,
"learning_rate": 8.648305084745763e-06,
"loss": 0.6955,
"step": 420
},
{
"epoch": 1.750254841997961,
"grad_norm": 7.942169666290283,
"learning_rate": 8.60593220338983e-06,
"loss": 0.5284,
"step": 430
},
{
"epoch": 1.7910295616717635,
"grad_norm": 9.36421012878418,
"learning_rate": 8.563559322033898e-06,
"loss": 0.7499,
"step": 440
},
{
"epoch": 1.8318042813455657,
"grad_norm": 12.293156623840332,
"learning_rate": 8.521186440677967e-06,
"loss": 0.6923,
"step": 450
},
{
"epoch": 1.8725790010193681,
"grad_norm": 9.337946891784668,
"learning_rate": 8.478813559322035e-06,
"loss": 0.6138,
"step": 460
},
{
"epoch": 1.9133537206931703,
"grad_norm": 10.548377990722656,
"learning_rate": 8.436440677966102e-06,
"loss": 0.6029,
"step": 470
},
{
"epoch": 1.9541284403669725,
"grad_norm": 17.341978073120117,
"learning_rate": 8.39406779661017e-06,
"loss": 0.624,
"step": 480
},
{
"epoch": 1.9949031600407747,
"grad_norm": 13.546788215637207,
"learning_rate": 8.351694915254238e-06,
"loss": 0.6197,
"step": 490
},
{
"epoch": 2.0,
"eval_general_loss": 0.6272298097610474,
"eval_general_runtime": 255.3018,
"eval_general_samples_per_second": 3.584,
"eval_general_steps_per_second": 0.897,
"step": 492
},
{
"epoch": 2.0,
"eval_code_loss": 0.69377201795578,
"eval_code_runtime": 298.208,
"eval_code_samples_per_second": 3.078,
"eval_code_steps_per_second": 0.771,
"step": 492
},
{
"epoch": 2.0,
"eval_stem_loss": 0.7002437710762024,
"eval_stem_runtime": 251.7134,
"eval_stem_samples_per_second": 3.631,
"eval_stem_steps_per_second": 0.91,
"step": 492
},
{
"epoch": 2.032619775739042,
"grad_norm": 8.038460731506348,
"learning_rate": 8.309322033898306e-06,
"loss": 0.4989,
"step": 500
},
{
"epoch": 2.073394495412844,
"grad_norm": 11.866241455078125,
"learning_rate": 8.266949152542375e-06,
"loss": 0.6914,
"step": 510
},
{
"epoch": 2.1141692150866462,
"grad_norm": 6.622210502624512,
"learning_rate": 8.224576271186441e-06,
"loss": 0.5043,
"step": 520
},
{
"epoch": 2.1549439347604484,
"grad_norm": 11.639506340026855,
"learning_rate": 8.182203389830508e-06,
"loss": 0.6054,
"step": 530
},
{
"epoch": 2.1957186544342506,
"grad_norm": 8.884720802307129,
"learning_rate": 8.139830508474576e-06,
"loss": 0.5524,
"step": 540
},
{
"epoch": 2.236493374108053,
"grad_norm": 12.228666305541992,
"learning_rate": 8.097457627118644e-06,
"loss": 0.6011,
"step": 550
},
{
"epoch": 2.2772680937818555,
"grad_norm": 10.770685195922852,
"learning_rate": 8.055084745762713e-06,
"loss": 0.5481,
"step": 560
},
{
"epoch": 2.3180428134556577,
"grad_norm": 12.777567863464355,
"learning_rate": 8.012711864406781e-06,
"loss": 0.496,
"step": 570
},
{
"epoch": 2.35881753312946,
"grad_norm": 12.713486671447754,
"learning_rate": 7.970338983050848e-06,
"loss": 0.4764,
"step": 580
},
{
"epoch": 2.399592252803262,
"grad_norm": 15.633949279785156,
"learning_rate": 7.927966101694916e-06,
"loss": 0.6179,
"step": 590
},
{
"epoch": 2.4403669724770642,
"grad_norm": 10.710966110229492,
"learning_rate": 7.885593220338984e-06,
"loss": 0.5726,
"step": 600
},
{
"epoch": 2.4811416921508664,
"grad_norm": 10.519601821899414,
"learning_rate": 7.843220338983052e-06,
"loss": 0.4988,
"step": 610
},
{
"epoch": 2.5219164118246686,
"grad_norm": 7.1472392082214355,
"learning_rate": 7.800847457627119e-06,
"loss": 0.5648,
"step": 620
},
{
"epoch": 2.562691131498471,
"grad_norm": 13.704323768615723,
"learning_rate": 7.758474576271187e-06,
"loss": 0.6488,
"step": 630
},
{
"epoch": 2.603465851172273,
"grad_norm": 8.187666893005371,
"learning_rate": 7.716101694915254e-06,
"loss": 0.5047,
"step": 640
},
{
"epoch": 2.6442405708460752,
"grad_norm": 8.175298690795898,
"learning_rate": 7.673728813559322e-06,
"loss": 0.5948,
"step": 650
},
{
"epoch": 2.6850152905198774,
"grad_norm": 11.140478134155273,
"learning_rate": 7.63135593220339e-06,
"loss": 0.5212,
"step": 660
},
{
"epoch": 2.72579001019368,
"grad_norm": 10.118830680847168,
"learning_rate": 7.588983050847458e-06,
"loss": 0.506,
"step": 670
},
{
"epoch": 2.7665647298674823,
"grad_norm": 12.593578338623047,
"learning_rate": 7.546610169491526e-06,
"loss": 0.5584,
"step": 680
},
{
"epoch": 2.8073394495412844,
"grad_norm": 8.544402122497559,
"learning_rate": 7.504237288135594e-06,
"loss": 0.5202,
"step": 690
},
{
"epoch": 2.8481141692150866,
"grad_norm": 11.403071403503418,
"learning_rate": 7.461864406779662e-06,
"loss": 0.6049,
"step": 700
},
{
"epoch": 2.888888888888889,
"grad_norm": 12.620019912719727,
"learning_rate": 7.41949152542373e-06,
"loss": 0.5484,
"step": 710
},
{
"epoch": 2.929663608562691,
"grad_norm": 8.751862525939941,
"learning_rate": 7.377118644067798e-06,
"loss": 0.5454,
"step": 720
},
{
"epoch": 2.9704383282364932,
"grad_norm": 10.521064758300781,
"learning_rate": 7.334745762711864e-06,
"loss": 0.5242,
"step": 730
},
{
"epoch": 3.0,
"eval_general_loss": 0.5933007597923279,
"eval_general_runtime": 255.4722,
"eval_general_samples_per_second": 3.582,
"eval_general_steps_per_second": 0.896,
"step": 738
},
{
"epoch": 3.0,
"eval_code_loss": 0.6812524795532227,
"eval_code_runtime": 299.2052,
"eval_code_samples_per_second": 3.068,
"eval_code_steps_per_second": 0.769,
"step": 738
},
{
"epoch": 3.0,
"eval_stem_loss": 0.6883611679077148,
"eval_stem_runtime": 251.705,
"eval_stem_samples_per_second": 3.631,
"eval_stem_steps_per_second": 0.91,
"step": 738
}
],
"logging_steps": 10,
"max_steps": 2460,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.6233005486340833e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}