llama3-1_8b_r1_annotated_aime / trainer_state.json
gsmyrnis's picture
End of training
f6112d1 verified
raw
history blame
19 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9626168224299065,
"eval_steps": 500,
"global_step": 105,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.028037383177570093,
"grad_norm": 6.875266772140589,
"learning_rate": 9.090909090909091e-07,
"loss": 0.8809,
"step": 1
},
{
"epoch": 0.056074766355140186,
"grad_norm": 7.218806225736005,
"learning_rate": 1.8181818181818183e-06,
"loss": 0.9166,
"step": 2
},
{
"epoch": 0.08411214953271028,
"grad_norm": 7.165217482531288,
"learning_rate": 2.7272727272727272e-06,
"loss": 0.9283,
"step": 3
},
{
"epoch": 0.11214953271028037,
"grad_norm": 6.510593029573986,
"learning_rate": 3.6363636363636366e-06,
"loss": 0.883,
"step": 4
},
{
"epoch": 0.14018691588785046,
"grad_norm": 5.559523029774801,
"learning_rate": 4.5454545454545455e-06,
"loss": 0.8607,
"step": 5
},
{
"epoch": 0.16822429906542055,
"grad_norm": 2.778625777429955,
"learning_rate": 5.4545454545454545e-06,
"loss": 0.7817,
"step": 6
},
{
"epoch": 0.19626168224299065,
"grad_norm": 2.3920510999965168,
"learning_rate": 6.363636363636364e-06,
"loss": 0.7615,
"step": 7
},
{
"epoch": 0.22429906542056074,
"grad_norm": 4.85437969933909,
"learning_rate": 7.272727272727273e-06,
"loss": 0.7888,
"step": 8
},
{
"epoch": 0.2523364485981308,
"grad_norm": 4.930461277171273,
"learning_rate": 8.181818181818183e-06,
"loss": 0.7494,
"step": 9
},
{
"epoch": 0.2803738317757009,
"grad_norm": 4.678112801477435,
"learning_rate": 9.090909090909091e-06,
"loss": 0.7374,
"step": 10
},
{
"epoch": 0.308411214953271,
"grad_norm": 5.328794217016763,
"learning_rate": 1e-05,
"loss": 0.7183,
"step": 11
},
{
"epoch": 0.3364485981308411,
"grad_norm": 4.134378989150603,
"learning_rate": 9.997207818651273e-06,
"loss": 0.6848,
"step": 12
},
{
"epoch": 0.3644859813084112,
"grad_norm": 2.8544019317453837,
"learning_rate": 9.988834393115768e-06,
"loss": 0.6898,
"step": 13
},
{
"epoch": 0.3925233644859813,
"grad_norm": 2.3045684832011464,
"learning_rate": 9.97488907544252e-06,
"loss": 0.6622,
"step": 14
},
{
"epoch": 0.4205607476635514,
"grad_norm": 2.6024877815015226,
"learning_rate": 9.955387440773902e-06,
"loss": 0.6686,
"step": 15
},
{
"epoch": 0.4485981308411215,
"grad_norm": 2.2786915985988823,
"learning_rate": 9.930351269950144e-06,
"loss": 0.6354,
"step": 16
},
{
"epoch": 0.4766355140186916,
"grad_norm": 1.500565581436117,
"learning_rate": 9.899808525182935e-06,
"loss": 0.5975,
"step": 17
},
{
"epoch": 0.5046728971962616,
"grad_norm": 1.033773375804853,
"learning_rate": 9.863793318825186e-06,
"loss": 0.6067,
"step": 18
},
{
"epoch": 0.5327102803738317,
"grad_norm": 1.2427310405225207,
"learning_rate": 9.822345875271884e-06,
"loss": 0.6274,
"step": 19
},
{
"epoch": 0.5607476635514018,
"grad_norm": 1.318747659685276,
"learning_rate": 9.775512486034564e-06,
"loss": 0.5597,
"step": 20
},
{
"epoch": 0.5887850467289719,
"grad_norm": 0.870735684751146,
"learning_rate": 9.723345458039595e-06,
"loss": 0.5942,
"step": 21
},
{
"epoch": 0.616822429906542,
"grad_norm": 0.9586704338104365,
"learning_rate": 9.665903055208013e-06,
"loss": 0.6064,
"step": 22
},
{
"epoch": 0.6448598130841121,
"grad_norm": 0.9954396610666782,
"learning_rate": 9.603249433382145e-06,
"loss": 0.5772,
"step": 23
},
{
"epoch": 0.6728971962616822,
"grad_norm": 0.7812010108873707,
"learning_rate": 9.535454568671705e-06,
"loss": 0.5767,
"step": 24
},
{
"epoch": 0.7009345794392523,
"grad_norm": 0.7894256753145599,
"learning_rate": 9.462594179299408e-06,
"loss": 0.5637,
"step": 25
},
{
"epoch": 0.7289719626168224,
"grad_norm": 0.7390141422531185,
"learning_rate": 9.384749641033358e-06,
"loss": 0.5596,
"step": 26
},
{
"epoch": 0.7570093457943925,
"grad_norm": 0.665906702862532,
"learning_rate": 9.302007896300697e-06,
"loss": 0.5662,
"step": 27
},
{
"epoch": 0.7850467289719626,
"grad_norm": 0.6513412424138516,
"learning_rate": 9.214461357083986e-06,
"loss": 0.564,
"step": 28
},
{
"epoch": 0.8130841121495327,
"grad_norm": 0.6291636411395644,
"learning_rate": 9.122207801708802e-06,
"loss": 0.5384,
"step": 29
},
{
"epoch": 0.8411214953271028,
"grad_norm": 0.5902648332914012,
"learning_rate": 9.025350265637816e-06,
"loss": 0.5507,
"step": 30
},
{
"epoch": 0.8691588785046729,
"grad_norm": 0.6222451650411295,
"learning_rate": 8.923996926393306e-06,
"loss": 0.5441,
"step": 31
},
{
"epoch": 0.897196261682243,
"grad_norm": 0.6304032192628477,
"learning_rate": 8.818260982736662e-06,
"loss": 0.5416,
"step": 32
},
{
"epoch": 0.9252336448598131,
"grad_norm": 0.6583678403432456,
"learning_rate": 8.708260528239788e-06,
"loss": 0.5592,
"step": 33
},
{
"epoch": 0.9532710280373832,
"grad_norm": 0.5498290231534836,
"learning_rate": 8.594118419389648e-06,
"loss": 0.5424,
"step": 34
},
{
"epoch": 0.9813084112149533,
"grad_norm": 0.6007085748795916,
"learning_rate": 8.475962138373212e-06,
"loss": 0.5188,
"step": 35
},
{
"epoch": 1.0186915887850467,
"grad_norm": 1.1383281524493891,
"learning_rate": 8.353923650696119e-06,
"loss": 0.8184,
"step": 36
},
{
"epoch": 1.0467289719626167,
"grad_norm": 0.6158849949179445,
"learning_rate": 8.228139257794012e-06,
"loss": 0.5428,
"step": 37
},
{
"epoch": 1.074766355140187,
"grad_norm": 0.6272943078369314,
"learning_rate": 8.098749444801226e-06,
"loss": 0.5119,
"step": 38
},
{
"epoch": 1.102803738317757,
"grad_norm": 0.568415304099014,
"learning_rate": 7.965898723646777e-06,
"loss": 0.4796,
"step": 39
},
{
"epoch": 1.1308411214953271,
"grad_norm": 0.6027569200550916,
"learning_rate": 7.829735471652978e-06,
"loss": 0.4933,
"step": 40
},
{
"epoch": 1.158878504672897,
"grad_norm": 0.5930587267978846,
"learning_rate": 7.690411765816864e-06,
"loss": 0.5311,
"step": 41
},
{
"epoch": 1.1869158878504673,
"grad_norm": 0.6168439645027234,
"learning_rate": 7.548083212959588e-06,
"loss": 0.5141,
"step": 42
},
{
"epoch": 1.2149532710280373,
"grad_norm": 0.6079982582016128,
"learning_rate": 7.402908775933419e-06,
"loss": 0.5008,
"step": 43
},
{
"epoch": 1.2429906542056075,
"grad_norm": 0.5296668735528899,
"learning_rate": 7.25505059608051e-06,
"loss": 0.4565,
"step": 44
},
{
"epoch": 1.2710280373831775,
"grad_norm": 0.5712532712527553,
"learning_rate": 7.104673812141676e-06,
"loss": 0.5265,
"step": 45
},
{
"epoch": 1.2990654205607477,
"grad_norm": 0.5116524757509279,
"learning_rate": 6.9519463758174745e-06,
"loss": 0.4636,
"step": 46
},
{
"epoch": 1.3271028037383177,
"grad_norm": 0.5840481861796132,
"learning_rate": 6.797038864187564e-06,
"loss": 0.5074,
"step": 47
},
{
"epoch": 1.355140186915888,
"grad_norm": 0.6543600787488231,
"learning_rate": 6.640124289197845e-06,
"loss": 0.4534,
"step": 48
},
{
"epoch": 1.3831775700934579,
"grad_norm": 0.5883296850707392,
"learning_rate": 6.481377904428171e-06,
"loss": 0.507,
"step": 49
},
{
"epoch": 1.411214953271028,
"grad_norm": 0.5352442788246675,
"learning_rate": 6.3209770093564315e-06,
"loss": 0.5014,
"step": 50
},
{
"epoch": 1.439252336448598,
"grad_norm": 0.5663671022085269,
"learning_rate": 6.1591007513376425e-06,
"loss": 0.5154,
"step": 51
},
{
"epoch": 1.4672897196261683,
"grad_norm": 0.512804434398335,
"learning_rate": 5.995929925519181e-06,
"loss": 0.4693,
"step": 52
},
{
"epoch": 1.4953271028037383,
"grad_norm": 0.4895943761905167,
"learning_rate": 5.831646772915651e-06,
"loss": 0.4727,
"step": 53
},
{
"epoch": 1.5233644859813085,
"grad_norm": 0.4781001067743965,
"learning_rate": 5.666434776868895e-06,
"loss": 0.4795,
"step": 54
},
{
"epoch": 1.5514018691588785,
"grad_norm": 0.5100572007511248,
"learning_rate": 5.500478458120493e-06,
"loss": 0.485,
"step": 55
},
{
"epoch": 1.5794392523364484,
"grad_norm": 0.4593347768627825,
"learning_rate": 5.3339631687256085e-06,
"loss": 0.4604,
"step": 56
},
{
"epoch": 1.6074766355140186,
"grad_norm": 0.4355559696185358,
"learning_rate": 5.1670748850383734e-06,
"loss": 0.4949,
"step": 57
},
{
"epoch": 1.6355140186915889,
"grad_norm": 0.44530627331541606,
"learning_rate": 5e-06,
"loss": 0.4559,
"step": 58
},
{
"epoch": 1.6635514018691588,
"grad_norm": 0.5290768251124396,
"learning_rate": 4.832925114961629e-06,
"loss": 0.5491,
"step": 59
},
{
"epoch": 1.6915887850467288,
"grad_norm": 0.4698401515761138,
"learning_rate": 4.666036831274392e-06,
"loss": 0.4745,
"step": 60
},
{
"epoch": 1.719626168224299,
"grad_norm": 0.47189506368769507,
"learning_rate": 4.499521541879508e-06,
"loss": 0.4917,
"step": 61
},
{
"epoch": 1.7476635514018692,
"grad_norm": 0.41120610678659153,
"learning_rate": 4.333565223131107e-06,
"loss": 0.4254,
"step": 62
},
{
"epoch": 1.7757009345794392,
"grad_norm": 0.5202914823494961,
"learning_rate": 4.1683532270843505e-06,
"loss": 0.5126,
"step": 63
},
{
"epoch": 1.8037383177570092,
"grad_norm": 0.46002205816765607,
"learning_rate": 4.004070074480821e-06,
"loss": 0.4428,
"step": 64
},
{
"epoch": 1.8317757009345794,
"grad_norm": 0.48289040175388753,
"learning_rate": 3.840899248662358e-06,
"loss": 0.4728,
"step": 65
},
{
"epoch": 1.8598130841121496,
"grad_norm": 0.45066569109739746,
"learning_rate": 3.6790229906435706e-06,
"loss": 0.4847,
"step": 66
},
{
"epoch": 1.8878504672897196,
"grad_norm": 0.4982717771783117,
"learning_rate": 3.518622095571831e-06,
"loss": 0.4993,
"step": 67
},
{
"epoch": 1.9158878504672896,
"grad_norm": 0.45582845017231777,
"learning_rate": 3.3598757108021546e-06,
"loss": 0.4858,
"step": 68
},
{
"epoch": 1.9439252336448598,
"grad_norm": 0.41998644093652787,
"learning_rate": 3.202961135812437e-06,
"loss": 0.4497,
"step": 69
},
{
"epoch": 1.97196261682243,
"grad_norm": 0.4851280675327603,
"learning_rate": 3.0480536241825263e-06,
"loss": 0.484,
"step": 70
},
{
"epoch": 2.0093457943925235,
"grad_norm": 0.9237312028698884,
"learning_rate": 2.8953261878583263e-06,
"loss": 0.8215,
"step": 71
},
{
"epoch": 2.0373831775700935,
"grad_norm": 0.44979019020801697,
"learning_rate": 2.74494940391949e-06,
"loss": 0.454,
"step": 72
},
{
"epoch": 2.0654205607476634,
"grad_norm": 0.4809006872896553,
"learning_rate": 2.5970912240665815e-06,
"loss": 0.4252,
"step": 73
},
{
"epoch": 2.0934579439252334,
"grad_norm": 0.4731514706694169,
"learning_rate": 2.4519167870404126e-06,
"loss": 0.4565,
"step": 74
},
{
"epoch": 2.121495327102804,
"grad_norm": 0.45029828757143353,
"learning_rate": 2.309588234183137e-06,
"loss": 0.4449,
"step": 75
},
{
"epoch": 2.149532710280374,
"grad_norm": 0.42070476862356,
"learning_rate": 2.1702645283470238e-06,
"loss": 0.4156,
"step": 76
},
{
"epoch": 2.177570093457944,
"grad_norm": 0.44024408387050806,
"learning_rate": 2.0341012763532243e-06,
"loss": 0.451,
"step": 77
},
{
"epoch": 2.205607476635514,
"grad_norm": 0.4523773144325659,
"learning_rate": 1.9012505551987764e-06,
"loss": 0.4688,
"step": 78
},
{
"epoch": 2.2336448598130842,
"grad_norm": 0.43662698635405134,
"learning_rate": 1.771860742205988e-06,
"loss": 0.4582,
"step": 79
},
{
"epoch": 2.2616822429906542,
"grad_norm": 0.43338767275513607,
"learning_rate": 1.646076349303884e-06,
"loss": 0.4355,
"step": 80
},
{
"epoch": 2.289719626168224,
"grad_norm": 0.4338864500390928,
"learning_rate": 1.5240378616267887e-06,
"loss": 0.4345,
"step": 81
},
{
"epoch": 2.317757009345794,
"grad_norm": 0.39866030523916557,
"learning_rate": 1.4058815806103542e-06,
"loss": 0.4131,
"step": 82
},
{
"epoch": 2.3457943925233646,
"grad_norm": 0.42323573401128867,
"learning_rate": 1.2917394717602123e-06,
"loss": 0.4497,
"step": 83
},
{
"epoch": 2.3738317757009346,
"grad_norm": 0.42392359802291224,
"learning_rate": 1.1817390172633402e-06,
"loss": 0.4334,
"step": 84
},
{
"epoch": 2.4018691588785046,
"grad_norm": 0.4396866917062723,
"learning_rate": 1.0760030736066952e-06,
"loss": 0.4538,
"step": 85
},
{
"epoch": 2.4299065420560746,
"grad_norm": 0.44043651868336015,
"learning_rate": 9.746497343621857e-07,
"loss": 0.4733,
"step": 86
},
{
"epoch": 2.457943925233645,
"grad_norm": 0.4475720584061158,
"learning_rate": 8.777921982911996e-07,
"loss": 0.4268,
"step": 87
},
{
"epoch": 2.485981308411215,
"grad_norm": 0.4136538302801054,
"learning_rate": 7.85538642916015e-07,
"loss": 0.4249,
"step": 88
},
{
"epoch": 2.514018691588785,
"grad_norm": 0.41828673759448115,
"learning_rate": 6.979921036993042e-07,
"loss": 0.4666,
"step": 89
},
{
"epoch": 2.542056074766355,
"grad_norm": 0.4102107844240581,
"learning_rate": 6.152503589666426e-07,
"loss": 0.418,
"step": 90
},
{
"epoch": 2.5700934579439254,
"grad_norm": 0.395956232845265,
"learning_rate": 5.374058207005945e-07,
"loss": 0.4435,
"step": 91
},
{
"epoch": 2.5981308411214954,
"grad_norm": 0.3694009730705117,
"learning_rate": 4.6454543132829653e-07,
"loss": 0.4282,
"step": 92
},
{
"epoch": 2.6261682242990654,
"grad_norm": 0.4140634105484608,
"learning_rate": 3.9675056661785563e-07,
"loss": 0.459,
"step": 93
},
{
"epoch": 2.6542056074766354,
"grad_norm": 0.40034707005808723,
"learning_rate": 3.340969447919873e-07,
"loss": 0.4625,
"step": 94
},
{
"epoch": 2.6822429906542054,
"grad_norm": 0.37586829917131903,
"learning_rate": 2.7665454196040665e-07,
"loss": 0.4631,
"step": 95
},
{
"epoch": 2.710280373831776,
"grad_norm": 0.3947866262040236,
"learning_rate": 2.2448751396543788e-07,
"loss": 0.4534,
"step": 96
},
{
"epoch": 2.7383177570093458,
"grad_norm": 0.40204501035512735,
"learning_rate": 1.776541247281177e-07,
"loss": 0.4504,
"step": 97
},
{
"epoch": 2.7663551401869158,
"grad_norm": 0.3553350361388555,
"learning_rate": 1.3620668117481471e-07,
"loss": 0.4242,
"step": 98
},
{
"epoch": 2.794392523364486,
"grad_norm": 0.40811329455293177,
"learning_rate": 1.0019147481706626e-07,
"loss": 0.4748,
"step": 99
},
{
"epoch": 2.822429906542056,
"grad_norm": 0.36467769059578176,
"learning_rate": 6.964873004985717e-08,
"loss": 0.4288,
"step": 100
},
{
"epoch": 2.850467289719626,
"grad_norm": 0.3850026648540873,
"learning_rate": 4.461255922609986e-08,
"loss": 0.4565,
"step": 101
},
{
"epoch": 2.878504672897196,
"grad_norm": 0.32943826260555276,
"learning_rate": 2.511092455747932e-08,
"loss": 0.4166,
"step": 102
},
{
"epoch": 2.906542056074766,
"grad_norm": 0.3976890084601136,
"learning_rate": 1.1165606884234182e-08,
"loss": 0.4491,
"step": 103
},
{
"epoch": 2.9345794392523366,
"grad_norm": 0.3762750087340585,
"learning_rate": 2.792181348726941e-09,
"loss": 0.4313,
"step": 104
},
{
"epoch": 2.9626168224299065,
"grad_norm": 0.36050810853602744,
"learning_rate": 0.0,
"loss": 0.4156,
"step": 105
},
{
"epoch": 2.9626168224299065,
"step": 105,
"total_flos": 93374567677952.0,
"train_loss": 0.5375007223515283,
"train_runtime": 1698.1054,
"train_samples_per_second": 6.012,
"train_steps_per_second": 0.062
}
],
"logging_steps": 1,
"max_steps": 105,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 93374567677952.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}