hp_ablations_qwen_lr1e-5 / trainer_state.json
sedrickkeh's picture
End of training
150b19a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.996011396011396,
"eval_steps": 500,
"global_step": 1314,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022792022792022793,
"grad_norm": 1.0146420064637809,
"learning_rate": 1e-05,
"loss": 0.7412,
"step": 10
},
{
"epoch": 0.045584045584045586,
"grad_norm": 0.7912257831574413,
"learning_rate": 1e-05,
"loss": 0.6876,
"step": 20
},
{
"epoch": 0.06837606837606838,
"grad_norm": 0.8690010677441243,
"learning_rate": 1e-05,
"loss": 0.666,
"step": 30
},
{
"epoch": 0.09116809116809117,
"grad_norm": 0.505631934162416,
"learning_rate": 1e-05,
"loss": 0.6638,
"step": 40
},
{
"epoch": 0.11396011396011396,
"grad_norm": 0.33858449719431943,
"learning_rate": 1e-05,
"loss": 0.6608,
"step": 50
},
{
"epoch": 0.13675213675213677,
"grad_norm": 0.3902928439357306,
"learning_rate": 1e-05,
"loss": 0.6445,
"step": 60
},
{
"epoch": 0.15954415954415954,
"grad_norm": 0.32811851282307186,
"learning_rate": 1e-05,
"loss": 0.6448,
"step": 70
},
{
"epoch": 0.18233618233618235,
"grad_norm": 0.34238369327660734,
"learning_rate": 1e-05,
"loss": 0.6497,
"step": 80
},
{
"epoch": 0.20512820512820512,
"grad_norm": 0.31623988973775263,
"learning_rate": 1e-05,
"loss": 0.6353,
"step": 90
},
{
"epoch": 0.22792022792022792,
"grad_norm": 0.31093405606619007,
"learning_rate": 1e-05,
"loss": 0.6368,
"step": 100
},
{
"epoch": 0.25071225071225073,
"grad_norm": 0.36187170115633943,
"learning_rate": 1e-05,
"loss": 0.6284,
"step": 110
},
{
"epoch": 0.27350427350427353,
"grad_norm": 0.3233998336839521,
"learning_rate": 1e-05,
"loss": 0.6366,
"step": 120
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.3913907420956762,
"learning_rate": 1e-05,
"loss": 0.6377,
"step": 130
},
{
"epoch": 0.3190883190883191,
"grad_norm": 0.3360481444653111,
"learning_rate": 1e-05,
"loss": 0.6398,
"step": 140
},
{
"epoch": 0.3418803418803419,
"grad_norm": 0.3175168014145369,
"learning_rate": 1e-05,
"loss": 0.6323,
"step": 150
},
{
"epoch": 0.3646723646723647,
"grad_norm": 0.37657481652603003,
"learning_rate": 1e-05,
"loss": 0.6376,
"step": 160
},
{
"epoch": 0.38746438746438744,
"grad_norm": 0.35292598832747574,
"learning_rate": 1e-05,
"loss": 0.6294,
"step": 170
},
{
"epoch": 0.41025641025641024,
"grad_norm": 0.3020552424319043,
"learning_rate": 1e-05,
"loss": 0.635,
"step": 180
},
{
"epoch": 0.43304843304843305,
"grad_norm": 0.3439921293702071,
"learning_rate": 1e-05,
"loss": 0.6276,
"step": 190
},
{
"epoch": 0.45584045584045585,
"grad_norm": 0.3327414470872692,
"learning_rate": 1e-05,
"loss": 0.6356,
"step": 200
},
{
"epoch": 0.47863247863247865,
"grad_norm": 0.34684235410854686,
"learning_rate": 1e-05,
"loss": 0.6319,
"step": 210
},
{
"epoch": 0.5014245014245015,
"grad_norm": 0.3407250677678901,
"learning_rate": 1e-05,
"loss": 0.6357,
"step": 220
},
{
"epoch": 0.5242165242165242,
"grad_norm": 0.357024472193329,
"learning_rate": 1e-05,
"loss": 0.6314,
"step": 230
},
{
"epoch": 0.5470085470085471,
"grad_norm": 0.30797227810550154,
"learning_rate": 1e-05,
"loss": 0.6271,
"step": 240
},
{
"epoch": 0.5698005698005698,
"grad_norm": 0.33312179990469154,
"learning_rate": 1e-05,
"loss": 0.6321,
"step": 250
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.3367120191713227,
"learning_rate": 1e-05,
"loss": 0.6303,
"step": 260
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.33627122083313027,
"learning_rate": 1e-05,
"loss": 0.6274,
"step": 270
},
{
"epoch": 0.6381766381766382,
"grad_norm": 0.362508172904337,
"learning_rate": 1e-05,
"loss": 0.6382,
"step": 280
},
{
"epoch": 0.6609686609686609,
"grad_norm": 0.34361915555957834,
"learning_rate": 1e-05,
"loss": 0.6269,
"step": 290
},
{
"epoch": 0.6837606837606838,
"grad_norm": 0.3287032976695359,
"learning_rate": 1e-05,
"loss": 0.6249,
"step": 300
},
{
"epoch": 0.7065527065527065,
"grad_norm": 0.3191486869355932,
"learning_rate": 1e-05,
"loss": 0.6248,
"step": 310
},
{
"epoch": 0.7293447293447294,
"grad_norm": 0.31860851089180897,
"learning_rate": 1e-05,
"loss": 0.6254,
"step": 320
},
{
"epoch": 0.7521367521367521,
"grad_norm": 0.3846001822856447,
"learning_rate": 1e-05,
"loss": 0.622,
"step": 330
},
{
"epoch": 0.7749287749287749,
"grad_norm": 0.3477286187132196,
"learning_rate": 1e-05,
"loss": 0.6277,
"step": 340
},
{
"epoch": 0.7977207977207977,
"grad_norm": 0.366357630454119,
"learning_rate": 1e-05,
"loss": 0.6348,
"step": 350
},
{
"epoch": 0.8205128205128205,
"grad_norm": 0.3609953342647578,
"learning_rate": 1e-05,
"loss": 0.6267,
"step": 360
},
{
"epoch": 0.8433048433048433,
"grad_norm": 0.41707694363193043,
"learning_rate": 1e-05,
"loss": 0.6203,
"step": 370
},
{
"epoch": 0.8660968660968661,
"grad_norm": 0.3638258217776452,
"learning_rate": 1e-05,
"loss": 0.6326,
"step": 380
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.36073013889545996,
"learning_rate": 1e-05,
"loss": 0.6312,
"step": 390
},
{
"epoch": 0.9116809116809117,
"grad_norm": 0.3707654840473297,
"learning_rate": 1e-05,
"loss": 0.6343,
"step": 400
},
{
"epoch": 0.9344729344729344,
"grad_norm": 0.34309367527787005,
"learning_rate": 1e-05,
"loss": 0.6306,
"step": 410
},
{
"epoch": 0.9572649572649573,
"grad_norm": 0.35582985450029564,
"learning_rate": 1e-05,
"loss": 0.6176,
"step": 420
},
{
"epoch": 0.98005698005698,
"grad_norm": 0.4461192998565824,
"learning_rate": 1e-05,
"loss": 0.6304,
"step": 430
},
{
"epoch": 0.9982905982905983,
"eval_loss": 0.621112048625946,
"eval_runtime": 440.3517,
"eval_samples_per_second": 26.849,
"eval_steps_per_second": 0.42,
"step": 438
},
{
"epoch": 1.0034188034188034,
"grad_norm": 0.40599764408780503,
"learning_rate": 1e-05,
"loss": 0.6403,
"step": 440
},
{
"epoch": 1.0262108262108263,
"grad_norm": 0.33824605577756234,
"learning_rate": 1e-05,
"loss": 0.5777,
"step": 450
},
{
"epoch": 1.049002849002849,
"grad_norm": 0.32924589697953344,
"learning_rate": 1e-05,
"loss": 0.5713,
"step": 460
},
{
"epoch": 1.0717948717948718,
"grad_norm": 0.3390857296918303,
"learning_rate": 1e-05,
"loss": 0.5722,
"step": 470
},
{
"epoch": 1.0945868945868946,
"grad_norm": 0.35001642645281617,
"learning_rate": 1e-05,
"loss": 0.5799,
"step": 480
},
{
"epoch": 1.1173789173789175,
"grad_norm": 0.35554307229590787,
"learning_rate": 1e-05,
"loss": 0.5849,
"step": 490
},
{
"epoch": 1.1401709401709401,
"grad_norm": 0.31083370576221864,
"learning_rate": 1e-05,
"loss": 0.5755,
"step": 500
},
{
"epoch": 1.162962962962963,
"grad_norm": 0.31553364890263425,
"learning_rate": 1e-05,
"loss": 0.5736,
"step": 510
},
{
"epoch": 1.1857549857549858,
"grad_norm": 0.2987072829265303,
"learning_rate": 1e-05,
"loss": 0.5794,
"step": 520
},
{
"epoch": 1.2085470085470085,
"grad_norm": 0.3357868380230108,
"learning_rate": 1e-05,
"loss": 0.5731,
"step": 530
},
{
"epoch": 1.2313390313390313,
"grad_norm": 0.3424754214162845,
"learning_rate": 1e-05,
"loss": 0.5831,
"step": 540
},
{
"epoch": 1.2541310541310542,
"grad_norm": 0.2994505023233895,
"learning_rate": 1e-05,
"loss": 0.5798,
"step": 550
},
{
"epoch": 1.2769230769230768,
"grad_norm": 0.3261302844209219,
"learning_rate": 1e-05,
"loss": 0.5818,
"step": 560
},
{
"epoch": 1.2997150997150997,
"grad_norm": 0.32160593176293023,
"learning_rate": 1e-05,
"loss": 0.5762,
"step": 570
},
{
"epoch": 1.3225071225071225,
"grad_norm": 0.31343014468938596,
"learning_rate": 1e-05,
"loss": 0.5747,
"step": 580
},
{
"epoch": 1.3452991452991454,
"grad_norm": 0.3029232044781107,
"learning_rate": 1e-05,
"loss": 0.5744,
"step": 590
},
{
"epoch": 1.368091168091168,
"grad_norm": 0.3204205916615222,
"learning_rate": 1e-05,
"loss": 0.5801,
"step": 600
},
{
"epoch": 1.390883190883191,
"grad_norm": 0.33901583811674174,
"learning_rate": 1e-05,
"loss": 0.5802,
"step": 610
},
{
"epoch": 1.4136752136752135,
"grad_norm": 0.35647413040021203,
"learning_rate": 1e-05,
"loss": 0.5831,
"step": 620
},
{
"epoch": 1.4364672364672364,
"grad_norm": 0.3926041011121538,
"learning_rate": 1e-05,
"loss": 0.5813,
"step": 630
},
{
"epoch": 1.4592592592592593,
"grad_norm": 0.33806390127793345,
"learning_rate": 1e-05,
"loss": 0.5762,
"step": 640
},
{
"epoch": 1.4820512820512821,
"grad_norm": 0.328698581982291,
"learning_rate": 1e-05,
"loss": 0.5744,
"step": 650
},
{
"epoch": 1.504843304843305,
"grad_norm": 0.3395135859147101,
"learning_rate": 1e-05,
"loss": 0.5742,
"step": 660
},
{
"epoch": 1.5276353276353276,
"grad_norm": 0.3885455805540249,
"learning_rate": 1e-05,
"loss": 0.5816,
"step": 670
},
{
"epoch": 1.5504273504273505,
"grad_norm": 0.30996592095776465,
"learning_rate": 1e-05,
"loss": 0.5707,
"step": 680
},
{
"epoch": 1.573219373219373,
"grad_norm": 0.3146912481858014,
"learning_rate": 1e-05,
"loss": 0.5744,
"step": 690
},
{
"epoch": 1.596011396011396,
"grad_norm": 0.31588213782224217,
"learning_rate": 1e-05,
"loss": 0.59,
"step": 700
},
{
"epoch": 1.6188034188034188,
"grad_norm": 0.32153673893804763,
"learning_rate": 1e-05,
"loss": 0.5774,
"step": 710
},
{
"epoch": 1.6415954415954417,
"grad_norm": 0.31257504452618234,
"learning_rate": 1e-05,
"loss": 0.5818,
"step": 720
},
{
"epoch": 1.6643874643874645,
"grad_norm": 0.3253370468338142,
"learning_rate": 1e-05,
"loss": 0.5752,
"step": 730
},
{
"epoch": 1.6871794871794872,
"grad_norm": 0.35468087072518323,
"learning_rate": 1e-05,
"loss": 0.5765,
"step": 740
},
{
"epoch": 1.7099715099715098,
"grad_norm": 0.2941502107066292,
"learning_rate": 1e-05,
"loss": 0.574,
"step": 750
},
{
"epoch": 1.7327635327635327,
"grad_norm": 0.4040377978043082,
"learning_rate": 1e-05,
"loss": 0.5776,
"step": 760
},
{
"epoch": 1.7555555555555555,
"grad_norm": 0.3335681103789512,
"learning_rate": 1e-05,
"loss": 0.5743,
"step": 770
},
{
"epoch": 1.7783475783475784,
"grad_norm": 0.30226629822048895,
"learning_rate": 1e-05,
"loss": 0.5816,
"step": 780
},
{
"epoch": 1.8011396011396013,
"grad_norm": 0.3339556013176704,
"learning_rate": 1e-05,
"loss": 0.5793,
"step": 790
},
{
"epoch": 1.823931623931624,
"grad_norm": 0.3269246369063951,
"learning_rate": 1e-05,
"loss": 0.5666,
"step": 800
},
{
"epoch": 1.8467236467236468,
"grad_norm": 0.3224956260584643,
"learning_rate": 1e-05,
"loss": 0.5742,
"step": 810
},
{
"epoch": 1.8695156695156694,
"grad_norm": 0.3655123174434984,
"learning_rate": 1e-05,
"loss": 0.5721,
"step": 820
},
{
"epoch": 1.8923076923076922,
"grad_norm": 0.33531337558262225,
"learning_rate": 1e-05,
"loss": 0.5758,
"step": 830
},
{
"epoch": 1.915099715099715,
"grad_norm": 0.33279895151242517,
"learning_rate": 1e-05,
"loss": 0.5803,
"step": 840
},
{
"epoch": 1.937891737891738,
"grad_norm": 0.31841085542396463,
"learning_rate": 1e-05,
"loss": 0.5786,
"step": 850
},
{
"epoch": 1.9606837606837608,
"grad_norm": 0.3656774559080548,
"learning_rate": 1e-05,
"loss": 0.5798,
"step": 860
},
{
"epoch": 1.9834757834757835,
"grad_norm": 0.32129389800623026,
"learning_rate": 1e-05,
"loss": 0.5774,
"step": 870
},
{
"epoch": 1.9994301994301993,
"eval_loss": 0.6186357736587524,
"eval_runtime": 442.4653,
"eval_samples_per_second": 26.721,
"eval_steps_per_second": 0.418,
"step": 877
},
{
"epoch": 2.006837606837607,
"grad_norm": 0.40023961081016884,
"learning_rate": 1e-05,
"loss": 0.5896,
"step": 880
},
{
"epoch": 2.0296296296296297,
"grad_norm": 0.3934269207415266,
"learning_rate": 1e-05,
"loss": 0.5143,
"step": 890
},
{
"epoch": 2.0524216524216525,
"grad_norm": 0.3372672329666936,
"learning_rate": 1e-05,
"loss": 0.5277,
"step": 900
},
{
"epoch": 2.0752136752136754,
"grad_norm": 0.3590605375051872,
"learning_rate": 1e-05,
"loss": 0.5262,
"step": 910
},
{
"epoch": 2.098005698005698,
"grad_norm": 0.32027005855990875,
"learning_rate": 1e-05,
"loss": 0.529,
"step": 920
},
{
"epoch": 2.1207977207977207,
"grad_norm": 0.3122240355891651,
"learning_rate": 1e-05,
"loss": 0.5279,
"step": 930
},
{
"epoch": 2.1435897435897435,
"grad_norm": 0.306328979810517,
"learning_rate": 1e-05,
"loss": 0.523,
"step": 940
},
{
"epoch": 2.1663817663817664,
"grad_norm": 0.2997763176691866,
"learning_rate": 1e-05,
"loss": 0.5239,
"step": 950
},
{
"epoch": 2.1891737891737892,
"grad_norm": 0.30072443341331295,
"learning_rate": 1e-05,
"loss": 0.5291,
"step": 960
},
{
"epoch": 2.211965811965812,
"grad_norm": 0.3659610909649348,
"learning_rate": 1e-05,
"loss": 0.529,
"step": 970
},
{
"epoch": 2.234757834757835,
"grad_norm": 0.31170766785306336,
"learning_rate": 1e-05,
"loss": 0.5197,
"step": 980
},
{
"epoch": 2.2575498575498574,
"grad_norm": 0.3107127824005287,
"learning_rate": 1e-05,
"loss": 0.5287,
"step": 990
},
{
"epoch": 2.2803418803418802,
"grad_norm": 0.3077202897593116,
"learning_rate": 1e-05,
"loss": 0.5293,
"step": 1000
},
{
"epoch": 2.303133903133903,
"grad_norm": 0.3766294985445228,
"learning_rate": 1e-05,
"loss": 0.5269,
"step": 1010
},
{
"epoch": 2.325925925925926,
"grad_norm": 0.31670166754832074,
"learning_rate": 1e-05,
"loss": 0.5298,
"step": 1020
},
{
"epoch": 2.348717948717949,
"grad_norm": 0.3294563307726443,
"learning_rate": 1e-05,
"loss": 0.5311,
"step": 1030
},
{
"epoch": 2.3715099715099717,
"grad_norm": 0.33934557749302635,
"learning_rate": 1e-05,
"loss": 0.5299,
"step": 1040
},
{
"epoch": 2.394301994301994,
"grad_norm": 0.29973624613589284,
"learning_rate": 1e-05,
"loss": 0.5292,
"step": 1050
},
{
"epoch": 2.417094017094017,
"grad_norm": 0.30731911268155737,
"learning_rate": 1e-05,
"loss": 0.537,
"step": 1060
},
{
"epoch": 2.43988603988604,
"grad_norm": 0.3320184686838146,
"learning_rate": 1e-05,
"loss": 0.5397,
"step": 1070
},
{
"epoch": 2.4626780626780627,
"grad_norm": 0.33488791378484484,
"learning_rate": 1e-05,
"loss": 0.5354,
"step": 1080
},
{
"epoch": 2.4854700854700855,
"grad_norm": 0.29869988487306637,
"learning_rate": 1e-05,
"loss": 0.5339,
"step": 1090
},
{
"epoch": 2.5082621082621084,
"grad_norm": 0.3904880801438361,
"learning_rate": 1e-05,
"loss": 0.5394,
"step": 1100
},
{
"epoch": 2.5310541310541312,
"grad_norm": 0.38036081075155204,
"learning_rate": 1e-05,
"loss": 0.5341,
"step": 1110
},
{
"epoch": 2.5538461538461537,
"grad_norm": 0.30643406351346947,
"learning_rate": 1e-05,
"loss": 0.5322,
"step": 1120
},
{
"epoch": 2.5766381766381765,
"grad_norm": 0.2893340180647077,
"learning_rate": 1e-05,
"loss": 0.5319,
"step": 1130
},
{
"epoch": 2.5994301994301994,
"grad_norm": 0.29553745186992597,
"learning_rate": 1e-05,
"loss": 0.5318,
"step": 1140
},
{
"epoch": 2.6222222222222222,
"grad_norm": 0.3121235774200754,
"learning_rate": 1e-05,
"loss": 0.5324,
"step": 1150
},
{
"epoch": 2.645014245014245,
"grad_norm": 0.3537885981034807,
"learning_rate": 1e-05,
"loss": 0.5338,
"step": 1160
},
{
"epoch": 2.667806267806268,
"grad_norm": 0.3135782078081984,
"learning_rate": 1e-05,
"loss": 0.5361,
"step": 1170
},
{
"epoch": 2.690598290598291,
"grad_norm": 0.3120319160666968,
"learning_rate": 1e-05,
"loss": 0.5316,
"step": 1180
},
{
"epoch": 2.7133903133903132,
"grad_norm": 0.3721925648462127,
"learning_rate": 1e-05,
"loss": 0.5331,
"step": 1190
},
{
"epoch": 2.736182336182336,
"grad_norm": 0.35097259445904583,
"learning_rate": 1e-05,
"loss": 0.5362,
"step": 1200
},
{
"epoch": 2.758974358974359,
"grad_norm": 0.3191287188931511,
"learning_rate": 1e-05,
"loss": 0.5394,
"step": 1210
},
{
"epoch": 2.781766381766382,
"grad_norm": 0.3353550527181426,
"learning_rate": 1e-05,
"loss": 0.5301,
"step": 1220
},
{
"epoch": 2.8045584045584047,
"grad_norm": 0.3662461553840285,
"learning_rate": 1e-05,
"loss": 0.5286,
"step": 1230
},
{
"epoch": 2.827350427350427,
"grad_norm": 0.3582675691272763,
"learning_rate": 1e-05,
"loss": 0.5428,
"step": 1240
},
{
"epoch": 2.8501424501424504,
"grad_norm": 0.29421053202127245,
"learning_rate": 1e-05,
"loss": 0.5393,
"step": 1250
},
{
"epoch": 2.872934472934473,
"grad_norm": 0.3323816797271506,
"learning_rate": 1e-05,
"loss": 0.5369,
"step": 1260
},
{
"epoch": 2.8957264957264957,
"grad_norm": 0.34833626456933586,
"learning_rate": 1e-05,
"loss": 0.5381,
"step": 1270
},
{
"epoch": 2.9185185185185185,
"grad_norm": 0.3208962009280921,
"learning_rate": 1e-05,
"loss": 0.5288,
"step": 1280
},
{
"epoch": 2.9413105413105414,
"grad_norm": 0.3524250244854125,
"learning_rate": 1e-05,
"loss": 0.5293,
"step": 1290
},
{
"epoch": 2.9641025641025642,
"grad_norm": 0.31554960818197597,
"learning_rate": 1e-05,
"loss": 0.5303,
"step": 1300
},
{
"epoch": 2.9868945868945866,
"grad_norm": 0.33440739359228305,
"learning_rate": 1e-05,
"loss": 0.5394,
"step": 1310
},
{
"epoch": 2.996011396011396,
"eval_loss": 0.6266428232192993,
"eval_runtime": 446.7392,
"eval_samples_per_second": 26.465,
"eval_steps_per_second": 0.414,
"step": 1314
},
{
"epoch": 2.996011396011396,
"step": 1314,
"total_flos": 2755219238682624.0,
"train_loss": 0.5826670261641435,
"train_runtime": 70447.3802,
"train_samples_per_second": 9.565,
"train_steps_per_second": 0.019
}
],
"logging_steps": 10,
"max_steps": 1314,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2755219238682624.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}