nemo_nano_science_100k / trainer_state.json
ryanmarten's picture
Upload model
c4fd7f3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.924760601915184,
"eval_steps": 500,
"global_step": 110,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04377564979480164,
"grad_norm": 10.977038416240942,
"learning_rate": 7.272727272727273e-06,
"loss": 1.6783,
"step": 1
},
{
"epoch": 0.08755129958960328,
"grad_norm": 10.685982173301461,
"learning_rate": 1.4545454545454546e-05,
"loss": 1.6756,
"step": 2
},
{
"epoch": 0.13132694938440492,
"grad_norm": 7.560341958816415,
"learning_rate": 2.1818181818181818e-05,
"loss": 1.5708,
"step": 3
},
{
"epoch": 0.17510259917920656,
"grad_norm": 6.257863682610026,
"learning_rate": 2.9090909090909093e-05,
"loss": 1.5415,
"step": 4
},
{
"epoch": 0.2188782489740082,
"grad_norm": 9.338746208897016,
"learning_rate": 3.6363636363636364e-05,
"loss": 1.5506,
"step": 5
},
{
"epoch": 0.26265389876880985,
"grad_norm": 6.7019369753380404,
"learning_rate": 4.3636363636363636e-05,
"loss": 1.4494,
"step": 6
},
{
"epoch": 0.3064295485636115,
"grad_norm": 2.7387345576832467,
"learning_rate": 5.0909090909090914e-05,
"loss": 1.3764,
"step": 7
},
{
"epoch": 0.35020519835841313,
"grad_norm": 2.9905508612885416,
"learning_rate": 5.8181818181818185e-05,
"loss": 1.3124,
"step": 8
},
{
"epoch": 0.39398084815321477,
"grad_norm": 5.142107061812394,
"learning_rate": 6.545454545454546e-05,
"loss": 1.3263,
"step": 9
},
{
"epoch": 0.4377564979480164,
"grad_norm": 6.498361005724975,
"learning_rate": 7.272727272727273e-05,
"loss": 1.302,
"step": 10
},
{
"epoch": 0.48153214774281805,
"grad_norm": 4.736561117836272,
"learning_rate": 8e-05,
"loss": 1.2851,
"step": 11
},
{
"epoch": 0.5253077975376197,
"grad_norm": 4.913666681984767,
"learning_rate": 7.997986169532741e-05,
"loss": 1.2886,
"step": 12
},
{
"epoch": 0.5690834473324213,
"grad_norm": 3.3337518195815763,
"learning_rate": 7.991946705887539e-05,
"loss": 1.2546,
"step": 13
},
{
"epoch": 0.612859097127223,
"grad_norm": 2.7069045612640275,
"learning_rate": 7.981887690292339e-05,
"loss": 1.2314,
"step": 14
},
{
"epoch": 0.6566347469220246,
"grad_norm": 2.8584488966099855,
"learning_rate": 7.967819251323182e-05,
"loss": 1.2315,
"step": 15
},
{
"epoch": 0.7004103967168263,
"grad_norm": 3.1377249425512765,
"learning_rate": 7.949755554705577e-05,
"loss": 1.2093,
"step": 16
},
{
"epoch": 0.7441860465116279,
"grad_norm": 1.3936091349070174,
"learning_rate": 7.927714789050826e-05,
"loss": 1.196,
"step": 17
},
{
"epoch": 0.7879616963064295,
"grad_norm": 3.028923392641176,
"learning_rate": 7.90171914754163e-05,
"loss": 1.2348,
"step": 18
},
{
"epoch": 0.8317373461012312,
"grad_norm": 2.196134083608347,
"learning_rate": 7.871794805585427e-05,
"loss": 1.2053,
"step": 19
},
{
"epoch": 0.8755129958960328,
"grad_norm": 1.6611206571415684,
"learning_rate": 7.837971894457991e-05,
"loss": 1.1834,
"step": 20
},
{
"epoch": 0.9192886456908345,
"grad_norm": 2.681882547825232,
"learning_rate": 7.800284470963783e-05,
"loss": 1.1831,
"step": 21
},
{
"epoch": 0.9630642954856361,
"grad_norm": 1.5845981079722795,
"learning_rate": 7.758770483143634e-05,
"loss": 1.1698,
"step": 22
},
{
"epoch": 1.0341997264021887,
"grad_norm": 4.534657915532011,
"learning_rate": 7.71347173206429e-05,
"loss": 2.0516,
"step": 23
},
{
"epoch": 1.0779753761969904,
"grad_norm": 2.8451498787070433,
"learning_rate": 7.664433829728279e-05,
"loss": 1.152,
"step": 24
},
{
"epoch": 1.121751025991792,
"grad_norm": 1.9899709698106878,
"learning_rate": 7.611706153146486e-05,
"loss": 1.1468,
"step": 25
},
{
"epoch": 1.1655266757865936,
"grad_norm": 2.8389265579783345,
"learning_rate": 7.555341794619695e-05,
"loss": 1.1408,
"step": 26
},
{
"epoch": 1.2093023255813953,
"grad_norm": 2.038952629316326,
"learning_rate": 7.49539750827914e-05,
"loss": 1.1219,
"step": 27
},
{
"epoch": 1.253077975376197,
"grad_norm": 2.0796847065206743,
"learning_rate": 7.431933652939909e-05,
"loss": 1.1136,
"step": 28
},
{
"epoch": 1.2968536251709986,
"grad_norm": 1.447940531771876,
"learning_rate": 7.365014131324725e-05,
"loss": 1.1191,
"step": 29
},
{
"epoch": 1.3406292749658002,
"grad_norm": 1.337447462838519,
"learning_rate": 7.294706325719331e-05,
"loss": 1.1145,
"step": 30
},
{
"epoch": 1.3844049247606018,
"grad_norm": 2.831107849480652,
"learning_rate": 7.221081030124235e-05,
"loss": 1.1269,
"step": 31
},
{
"epoch": 1.4281805745554035,
"grad_norm": 1.3913695315986807,
"learning_rate": 7.144212378971151e-05,
"loss": 1.1065,
"step": 32
},
{
"epoch": 1.4719562243502051,
"grad_norm": 2.9279595696419647,
"learning_rate": 7.064177772475912e-05,
"loss": 1.1347,
"step": 33
},
{
"epoch": 1.5157318741450068,
"grad_norm": 2.1607556666180012,
"learning_rate": 6.98105779870302e-05,
"loss": 1.1176,
"step": 34
},
{
"epoch": 1.5595075239398084,
"grad_norm": 2.035595521860542,
"learning_rate": 6.89493615242028e-05,
"loss": 1.0969,
"step": 35
},
{
"epoch": 1.60328317373461,
"grad_norm": 1.9632755753820994,
"learning_rate": 6.805899550825285e-05,
"loss": 1.1039,
"step": 36
},
{
"epoch": 1.6470588235294117,
"grad_norm": 1.756863438311583,
"learning_rate": 6.714037646228529e-05,
"loss": 1.1059,
"step": 37
},
{
"epoch": 1.6908344733242133,
"grad_norm": 1.6660401725071718,
"learning_rate": 6.619442935781141e-05,
"loss": 1.098,
"step": 38
},
{
"epoch": 1.734610123119015,
"grad_norm": 1.4739841905330868,
"learning_rate": 6.52221066833809e-05,
"loss": 1.0852,
"step": 39
},
{
"epoch": 1.7783857729138166,
"grad_norm": 1.3135170392231592,
"learning_rate": 6.422438748550667e-05,
"loss": 1.0901,
"step": 40
},
{
"epoch": 1.8221614227086183,
"grad_norm": 1.3850190455081264,
"learning_rate": 6.320227638284793e-05,
"loss": 1.0781,
"step": 41
},
{
"epoch": 1.86593707250342,
"grad_norm": 0.9405053138714191,
"learning_rate": 6.215680255464442e-05,
"loss": 1.0825,
"step": 42
},
{
"epoch": 1.9097127222982215,
"grad_norm": 1.1113532920678866,
"learning_rate": 6.10890187044201e-05,
"loss": 1.0766,
"step": 43
},
{
"epoch": 1.9534883720930232,
"grad_norm": 1.187498515237833,
"learning_rate": 6.000000000000001e-05,
"loss": 1.0763,
"step": 44
},
{
"epoch": 2.024623803009576,
"grad_norm": 2.160651041437791,
"learning_rate": 5.889084299090732e-05,
"loss": 1.9051,
"step": 45
},
{
"epoch": 2.0683994528043774,
"grad_norm": 0.9784307660310307,
"learning_rate": 5.776266450423097e-05,
"loss": 1.0442,
"step": 46
},
{
"epoch": 2.112175102599179,
"grad_norm": 1.031157632132424,
"learning_rate": 5.661660052007547e-05,
"loss": 1.0507,
"step": 47
},
{
"epoch": 2.1559507523939807,
"grad_norm": 1.4533738667722713,
"learning_rate": 5.5453805027725145e-05,
"loss": 1.0499,
"step": 48
},
{
"epoch": 2.1997264021887823,
"grad_norm": 0.875133136635367,
"learning_rate": 5.427544886367488e-05,
"loss": 1.0381,
"step": 49
},
{
"epoch": 2.243502051983584,
"grad_norm": 0.9817484749723364,
"learning_rate": 5.3082718532696874e-05,
"loss": 1.0437,
"step": 50
},
{
"epoch": 2.2872777017783856,
"grad_norm": 1.223185197293581,
"learning_rate": 5.1876815013131e-05,
"loss": 1.0329,
"step": 51
},
{
"epoch": 2.3310533515731873,
"grad_norm": 0.8071734287546062,
"learning_rate": 5.06589525476014e-05,
"loss": 1.0287,
"step": 52
},
{
"epoch": 2.374829001367989,
"grad_norm": 1.290044073399014,
"learning_rate": 4.94303574203771e-05,
"loss": 1.04,
"step": 53
},
{
"epoch": 2.4186046511627906,
"grad_norm": 0.6470063533070358,
"learning_rate": 4.819226672260763e-05,
"loss": 1.0331,
"step": 54
},
{
"epoch": 2.462380300957592,
"grad_norm": 0.9069681051512629,
"learning_rate": 4.694592710667723e-05,
"loss": 1.0243,
"step": 55
},
{
"epoch": 2.506155950752394,
"grad_norm": 0.8654968713633596,
"learning_rate": 4.5692593530931416e-05,
"loss": 1.0052,
"step": 56
},
{
"epoch": 2.5499316005471955,
"grad_norm": 1.0033676935022624,
"learning_rate": 4.4433527996040443e-05,
"loss": 1.0299,
"step": 57
},
{
"epoch": 2.593707250341997,
"grad_norm": 0.6256956675554375,
"learning_rate": 4.316999827427154e-05,
"loss": 1.0344,
"step": 58
},
{
"epoch": 2.6374829001367988,
"grad_norm": 0.7160307755017002,
"learning_rate": 4.19032766329497e-05,
"loss": 1.0197,
"step": 59
},
{
"epoch": 2.6812585499316004,
"grad_norm": 0.7016377338794068,
"learning_rate": 4.063463855339232e-05,
"loss": 1.0142,
"step": 60
},
{
"epoch": 2.725034199726402,
"grad_norm": 0.5412134943726296,
"learning_rate": 3.9365361446607684e-05,
"loss": 1.0194,
"step": 61
},
{
"epoch": 2.7688098495212037,
"grad_norm": 0.5581535816981518,
"learning_rate": 3.809672336705031e-05,
"loss": 1.021,
"step": 62
},
{
"epoch": 2.8125854993160053,
"grad_norm": 0.4895297941884866,
"learning_rate": 3.683000172572846e-05,
"loss": 1.0145,
"step": 63
},
{
"epoch": 2.856361149110807,
"grad_norm": 0.5430942752350344,
"learning_rate": 3.556647200395956e-05,
"loss": 1.0159,
"step": 64
},
{
"epoch": 2.9001367989056086,
"grad_norm": 0.43778465011409384,
"learning_rate": 3.4307406469068604e-05,
"loss": 1.0037,
"step": 65
},
{
"epoch": 2.9439124487004102,
"grad_norm": 0.5574066963629686,
"learning_rate": 3.305407289332279e-05,
"loss": 1.0124,
"step": 66
},
{
"epoch": 3.015047879616963,
"grad_norm": 0.8597898186828361,
"learning_rate": 3.180773327739238e-05,
"loss": 1.8113,
"step": 67
},
{
"epoch": 3.0588235294117645,
"grad_norm": 0.7562084727521547,
"learning_rate": 3.0569642579622905e-05,
"loss": 0.9808,
"step": 68
},
{
"epoch": 3.102599179206566,
"grad_norm": 0.5467749248417106,
"learning_rate": 2.9341047452398607e-05,
"loss": 0.9786,
"step": 69
},
{
"epoch": 3.146374829001368,
"grad_norm": 0.7317552492863292,
"learning_rate": 2.8123184986869022e-05,
"loss": 0.9807,
"step": 70
},
{
"epoch": 3.19015047879617,
"grad_norm": 0.4988928521059118,
"learning_rate": 2.691728146730314e-05,
"loss": 0.9801,
"step": 71
},
{
"epoch": 3.233926128590971,
"grad_norm": 0.6214726877620874,
"learning_rate": 2.5724551136325132e-05,
"loss": 0.9752,
"step": 72
},
{
"epoch": 3.277701778385773,
"grad_norm": 0.5723924398692547,
"learning_rate": 2.4546194972274852e-05,
"loss": 0.9833,
"step": 73
},
{
"epoch": 3.3214774281805743,
"grad_norm": 0.3802903109677986,
"learning_rate": 2.338339947992455e-05,
"loss": 0.9704,
"step": 74
},
{
"epoch": 3.3652530779753764,
"grad_norm": 0.5679922591981417,
"learning_rate": 2.2237335495769035e-05,
"loss": 0.9832,
"step": 75
},
{
"epoch": 3.4090287277701776,
"grad_norm": 0.32921569959729985,
"learning_rate": 2.11091570090927e-05,
"loss": 0.9832,
"step": 76
},
{
"epoch": 3.4528043775649797,
"grad_norm": 0.41963134099051513,
"learning_rate": 2.0000000000000012e-05,
"loss": 0.9925,
"step": 77
},
{
"epoch": 3.496580027359781,
"grad_norm": 0.4216326585626274,
"learning_rate": 1.8910981295579903e-05,
"loss": 0.9693,
"step": 78
},
{
"epoch": 3.540355677154583,
"grad_norm": 0.281097341488956,
"learning_rate": 1.7843197445355593e-05,
"loss": 0.9687,
"step": 79
},
{
"epoch": 3.584131326949384,
"grad_norm": 0.34537711442895225,
"learning_rate": 1.679772361715208e-05,
"loss": 0.9792,
"step": 80
},
{
"epoch": 3.6279069767441863,
"grad_norm": 0.38729234396659473,
"learning_rate": 1.5775612514493343e-05,
"loss": 0.9716,
"step": 81
},
{
"epoch": 3.6716826265389875,
"grad_norm": 0.23216602871427763,
"learning_rate": 1.4777893316619114e-05,
"loss": 0.9791,
"step": 82
},
{
"epoch": 3.7154582763337896,
"grad_norm": 0.34978409677834293,
"learning_rate": 1.3805570642188602e-05,
"loss": 0.9686,
"step": 83
},
{
"epoch": 3.7592339261285908,
"grad_norm": 0.2917010744705113,
"learning_rate": 1.2859623537714719e-05,
"loss": 0.9674,
"step": 84
},
{
"epoch": 3.803009575923393,
"grad_norm": 0.2659722448857111,
"learning_rate": 1.1941004491747145e-05,
"loss": 0.9639,
"step": 85
},
{
"epoch": 3.846785225718194,
"grad_norm": 0.2936639116335773,
"learning_rate": 1.1050638475797193e-05,
"loss": 0.9609,
"step": 86
},
{
"epoch": 3.890560875512996,
"grad_norm": 0.22955492375415615,
"learning_rate": 1.0189422012969814e-05,
"loss": 0.9722,
"step": 87
},
{
"epoch": 3.9343365253077973,
"grad_norm": 0.20270732194600466,
"learning_rate": 9.358222275240884e-06,
"loss": 0.968,
"step": 88
},
{
"epoch": 4.00547195622435,
"grad_norm": 0.4286144089204306,
"learning_rate": 8.55787621028851e-06,
"loss": 1.746,
"step": 89
},
{
"epoch": 4.049247606019152,
"grad_norm": 0.156416222673832,
"learning_rate": 7.789189698757656e-06,
"loss": 0.9564,
"step": 90
},
{
"epoch": 4.093023255813954,
"grad_norm": 0.23495972177315438,
"learning_rate": 7.052936742806693e-06,
"loss": 0.9593,
"step": 91
},
{
"epoch": 4.136798905608755,
"grad_norm": 0.2226318621452638,
"learning_rate": 6.349858686752748e-06,
"loss": 0.9627,
"step": 92
},
{
"epoch": 4.180574555403557,
"grad_norm": 0.16215755051665368,
"learning_rate": 5.680663470600918e-06,
"loss": 0.9486,
"step": 93
},
{
"epoch": 4.224350205198358,
"grad_norm": 0.17528331588918997,
"learning_rate": 5.046024917208603e-06,
"loss": 0.9564,
"step": 94
},
{
"epoch": 4.26812585499316,
"grad_norm": 0.19247905962597153,
"learning_rate": 4.446582053803066e-06,
"loss": 0.9532,
"step": 95
},
{
"epoch": 4.311901504787961,
"grad_norm": 0.1497248018118723,
"learning_rate": 3.882938468535158e-06,
"loss": 0.9441,
"step": 96
},
{
"epoch": 4.3556771545827635,
"grad_norm": 0.1368167463262844,
"learning_rate": 3.3556617027172168e-06,
"loss": 0.9502,
"step": 97
},
{
"epoch": 4.399452804377565,
"grad_norm": 0.14584282376533797,
"learning_rate": 2.8652826793570975e-06,
"loss": 0.9499,
"step": 98
},
{
"epoch": 4.443228454172367,
"grad_norm": 0.1507749360921821,
"learning_rate": 2.4122951685636674e-06,
"loss": 0.9623,
"step": 99
},
{
"epoch": 4.487004103967168,
"grad_norm": 0.12741172341135956,
"learning_rate": 1.997155290362187e-06,
"loss": 0.9546,
"step": 100
},
{
"epoch": 4.53077975376197,
"grad_norm": 0.11988388634885762,
"learning_rate": 1.6202810554201099e-06,
"loss": 0.9634,
"step": 101
},
{
"epoch": 4.574555403556771,
"grad_norm": 0.10876564961414978,
"learning_rate": 1.2820519441457502e-06,
"loss": 0.9489,
"step": 102
},
{
"epoch": 4.618331053351573,
"grad_norm": 0.10280656448879005,
"learning_rate": 9.828085245837183e-07,
"loss": 0.9481,
"step": 103
},
{
"epoch": 4.6621067031463745,
"grad_norm": 0.10558127777006338,
"learning_rate": 7.228521094917318e-07,
"loss": 0.9517,
"step": 104
},
{
"epoch": 4.705882352941177,
"grad_norm": 0.10826937864073408,
"learning_rate": 5.024444529442285e-07,
"loss": 0.9544,
"step": 105
},
{
"epoch": 4.749658002735978,
"grad_norm": 0.10594923812863334,
"learning_rate": 3.218074867681864e-07,
"loss": 0.9384,
"step": 106
},
{
"epoch": 4.79343365253078,
"grad_norm": 0.10786116991531677,
"learning_rate": 1.8112309707661647e-07,
"loss": 0.943,
"step": 107
},
{
"epoch": 4.837209302325581,
"grad_norm": 0.11373942157615996,
"learning_rate": 8.053294112462696e-08,
"loss": 0.9525,
"step": 108
},
{
"epoch": 4.880984952120383,
"grad_norm": 0.10941188396825419,
"learning_rate": 2.01383046725967e-08,
"loss": 0.9539,
"step": 109
},
{
"epoch": 4.924760601915184,
"grad_norm": 0.10542013778303103,
"learning_rate": 0.0,
"loss": 0.9416,
"step": 110
},
{
"epoch": 4.924760601915184,
"step": 110,
"total_flos": 2.9249198798007173e+18,
"train_loss": 1.1118553096597845,
"train_runtime": 25822.1288,
"train_samples_per_second": 2.263,
"train_steps_per_second": 0.004
}
],
"logging_steps": 1,
"max_steps": 110,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.9249198798007173e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}