moba_qwen3-4b / trainer_state.json
ZetangForward's picture
Synced from ModelScope: LCM_group/moba_qwen3-4b (Auto-fixed license)
393ba8c verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2249212775528565,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002249212775528565,
"grad_norm": 11.230467796325684,
"learning_rate": 0.0,
"loss": 1.6122,
"num_input_tokens_seen": 262144,
"step": 1
},
{
"epoch": 0.000449842555105713,
"grad_norm": 5.675804615020752,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.4745,
"num_input_tokens_seen": 524288,
"step": 2
},
{
"epoch": 0.0006747638326585695,
"grad_norm": 5.168733596801758,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.2786,
"num_input_tokens_seen": 786432,
"step": 3
},
{
"epoch": 0.000899685110211426,
"grad_norm": 8.481163024902344,
"learning_rate": 3e-06,
"loss": 2.0106,
"num_input_tokens_seen": 1048576,
"step": 4
},
{
"epoch": 0.0011246063877642825,
"grad_norm": 7.578442573547363,
"learning_rate": 4.000000000000001e-06,
"loss": 1.9709,
"num_input_tokens_seen": 1310720,
"step": 5
},
{
"epoch": 0.001349527665317139,
"grad_norm": 5.638476848602295,
"learning_rate": 5e-06,
"loss": 1.4529,
"num_input_tokens_seen": 1572864,
"step": 6
},
{
"epoch": 0.0015744489428699954,
"grad_norm": 3.8255815505981445,
"learning_rate": 6e-06,
"loss": 0.9366,
"num_input_tokens_seen": 1835008,
"step": 7
},
{
"epoch": 0.001799370220422852,
"grad_norm": 3.63216495513916,
"learning_rate": 7.000000000000001e-06,
"loss": 1.2574,
"num_input_tokens_seen": 2097152,
"step": 8
},
{
"epoch": 0.0020242914979757085,
"grad_norm": 4.869683265686035,
"learning_rate": 8.000000000000001e-06,
"loss": 1.9996,
"num_input_tokens_seen": 2359296,
"step": 9
},
{
"epoch": 0.002249212775528565,
"grad_norm": 2.704763889312744,
"learning_rate": 9e-06,
"loss": 1.4956,
"num_input_tokens_seen": 2621440,
"step": 10
},
{
"epoch": 0.0024741340530814214,
"grad_norm": 3.840841054916382,
"learning_rate": 1e-05,
"loss": 2.4626,
"num_input_tokens_seen": 2883584,
"step": 11
},
{
"epoch": 0.002699055330634278,
"grad_norm": 2.593964099884033,
"learning_rate": 1.1000000000000001e-05,
"loss": 1.8367,
"num_input_tokens_seen": 3145728,
"step": 12
},
{
"epoch": 0.0029239766081871343,
"grad_norm": 2.4662444591522217,
"learning_rate": 1.2e-05,
"loss": 1.6849,
"num_input_tokens_seen": 3407872,
"step": 13
},
{
"epoch": 0.003148897885739991,
"grad_norm": 3.482602596282959,
"learning_rate": 1.3000000000000001e-05,
"loss": 1.948,
"num_input_tokens_seen": 3670016,
"step": 14
},
{
"epoch": 0.0033738191632928477,
"grad_norm": 8.98786735534668,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.3526,
"num_input_tokens_seen": 3932160,
"step": 15
},
{
"epoch": 0.003598740440845704,
"grad_norm": 1.5205129384994507,
"learning_rate": 1.5e-05,
"loss": 1.509,
"num_input_tokens_seen": 4194304,
"step": 16
},
{
"epoch": 0.0038236617183985606,
"grad_norm": 1.9695186614990234,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.0624,
"num_input_tokens_seen": 4456448,
"step": 17
},
{
"epoch": 0.004048582995951417,
"grad_norm": 8.045424461364746,
"learning_rate": 1.7000000000000003e-05,
"loss": 1.733,
"num_input_tokens_seen": 4718592,
"step": 18
},
{
"epoch": 0.004273504273504274,
"grad_norm": 1.50222909450531,
"learning_rate": 1.8e-05,
"loss": 1.8379,
"num_input_tokens_seen": 4980736,
"step": 19
},
{
"epoch": 0.00449842555105713,
"grad_norm": 1.0556864738464355,
"learning_rate": 1.9e-05,
"loss": 1.1625,
"num_input_tokens_seen": 5242880,
"step": 20
},
{
"epoch": 0.004723346828609987,
"grad_norm": 1.1621472835540771,
"learning_rate": 2e-05,
"loss": 1.7579,
"num_input_tokens_seen": 5505024,
"step": 21
},
{
"epoch": 0.004948268106162843,
"grad_norm": 1.5123505592346191,
"learning_rate": 2.1e-05,
"loss": 1.2971,
"num_input_tokens_seen": 5767168,
"step": 22
},
{
"epoch": 0.0051731893837157,
"grad_norm": 2.0969297885894775,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.6729,
"num_input_tokens_seen": 6029312,
"step": 23
},
{
"epoch": 0.005398110661268556,
"grad_norm": 1.5837117433547974,
"learning_rate": 2.3000000000000003e-05,
"loss": 2.0662,
"num_input_tokens_seen": 6291456,
"step": 24
},
{
"epoch": 0.005623031938821413,
"grad_norm": 0.7958189249038696,
"learning_rate": 2.4e-05,
"loss": 1.8239,
"num_input_tokens_seen": 6553600,
"step": 25
},
{
"epoch": 0.005847953216374269,
"grad_norm": 0.8515807390213013,
"learning_rate": 2.5e-05,
"loss": 1.3535,
"num_input_tokens_seen": 6815744,
"step": 26
},
{
"epoch": 0.006072874493927126,
"grad_norm": 0.6959180235862732,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.298,
"num_input_tokens_seen": 7077888,
"step": 27
},
{
"epoch": 0.006297795771479982,
"grad_norm": 0.8492688536643982,
"learning_rate": 2.7000000000000002e-05,
"loss": 1.4665,
"num_input_tokens_seen": 7340032,
"step": 28
},
{
"epoch": 0.0065227170490328385,
"grad_norm": 0.7743356823921204,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.9326,
"num_input_tokens_seen": 7602176,
"step": 29
},
{
"epoch": 0.006747638326585695,
"grad_norm": 0.6611963510513306,
"learning_rate": 2.9e-05,
"loss": 1.4156,
"num_input_tokens_seen": 7864320,
"step": 30
},
{
"epoch": 0.006972559604138551,
"grad_norm": 0.7195814251899719,
"learning_rate": 3e-05,
"loss": 1.3418,
"num_input_tokens_seen": 8126464,
"step": 31
},
{
"epoch": 0.007197480881691408,
"grad_norm": 0.985824465751648,
"learning_rate": 3.1e-05,
"loss": 1.6413,
"num_input_tokens_seen": 8388608,
"step": 32
},
{
"epoch": 0.007422402159244264,
"grad_norm": 0.8015071749687195,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.1193,
"num_input_tokens_seen": 8650752,
"step": 33
},
{
"epoch": 0.007647323436797121,
"grad_norm": 0.6262965798377991,
"learning_rate": 3.3e-05,
"loss": 1.843,
"num_input_tokens_seen": 8912896,
"step": 34
},
{
"epoch": 0.007872244714349977,
"grad_norm": 0.8492324352264404,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.3536,
"num_input_tokens_seen": 9175040,
"step": 35
},
{
"epoch": 0.008097165991902834,
"grad_norm": 0.9331862926483154,
"learning_rate": 3.5e-05,
"loss": 1.6696,
"num_input_tokens_seen": 9437184,
"step": 36
},
{
"epoch": 0.008322087269455691,
"grad_norm": 0.6202940344810486,
"learning_rate": 3.6e-05,
"loss": 1.4829,
"num_input_tokens_seen": 9699328,
"step": 37
},
{
"epoch": 0.008547008547008548,
"grad_norm": 0.7566497921943665,
"learning_rate": 3.7e-05,
"loss": 1.6269,
"num_input_tokens_seen": 9961472,
"step": 38
},
{
"epoch": 0.008771929824561403,
"grad_norm": 1.3238331079483032,
"learning_rate": 3.8e-05,
"loss": 1.2607,
"num_input_tokens_seen": 10223616,
"step": 39
},
{
"epoch": 0.00899685110211426,
"grad_norm": 0.6113777756690979,
"learning_rate": 3.9000000000000006e-05,
"loss": 2.1251,
"num_input_tokens_seen": 10485760,
"step": 40
},
{
"epoch": 0.009221772379667117,
"grad_norm": 0.4617583155632019,
"learning_rate": 4e-05,
"loss": 1.6756,
"num_input_tokens_seen": 10747904,
"step": 41
},
{
"epoch": 0.009446693657219974,
"grad_norm": 0.6212331652641296,
"learning_rate": 4.1e-05,
"loss": 1.5033,
"num_input_tokens_seen": 11010048,
"step": 42
},
{
"epoch": 0.009671614934772829,
"grad_norm": 0.40810370445251465,
"learning_rate": 4.2e-05,
"loss": 1.3236,
"num_input_tokens_seen": 11272192,
"step": 43
},
{
"epoch": 0.009896536212325686,
"grad_norm": 0.5068628191947937,
"learning_rate": 4.3e-05,
"loss": 1.4815,
"num_input_tokens_seen": 11534336,
"step": 44
},
{
"epoch": 0.010121457489878543,
"grad_norm": 0.40186524391174316,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.7775,
"num_input_tokens_seen": 11796480,
"step": 45
},
{
"epoch": 0.0103463787674314,
"grad_norm": 0.3428671956062317,
"learning_rate": 4.5e-05,
"loss": 1.2417,
"num_input_tokens_seen": 12058624,
"step": 46
},
{
"epoch": 0.010571300044984255,
"grad_norm": 0.519368588924408,
"learning_rate": 4.600000000000001e-05,
"loss": 1.7107,
"num_input_tokens_seen": 12320768,
"step": 47
},
{
"epoch": 0.010796221322537112,
"grad_norm": 0.41630029678344727,
"learning_rate": 4.7e-05,
"loss": 0.9635,
"num_input_tokens_seen": 12582912,
"step": 48
},
{
"epoch": 0.011021142600089968,
"grad_norm": 0.3969324827194214,
"learning_rate": 4.8e-05,
"loss": 1.5873,
"num_input_tokens_seen": 12845056,
"step": 49
},
{
"epoch": 0.011246063877642825,
"grad_norm": 0.358235239982605,
"learning_rate": 4.9e-05,
"loss": 1.8371,
"num_input_tokens_seen": 13107200,
"step": 50
},
{
"epoch": 0.011470985155195682,
"grad_norm": 0.40880703926086426,
"learning_rate": 5e-05,
"loss": 1.3239,
"num_input_tokens_seen": 13369344,
"step": 51
},
{
"epoch": 0.011695906432748537,
"grad_norm": 0.4353587329387665,
"learning_rate": 5.1000000000000006e-05,
"loss": 1.8723,
"num_input_tokens_seen": 13631488,
"step": 52
},
{
"epoch": 0.011920827710301394,
"grad_norm": 0.4280511736869812,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.3009,
"num_input_tokens_seen": 13893632,
"step": 53
},
{
"epoch": 0.012145748987854251,
"grad_norm": 0.318153440952301,
"learning_rate": 5.300000000000001e-05,
"loss": 1.4786,
"num_input_tokens_seen": 14155776,
"step": 54
},
{
"epoch": 0.012370670265407108,
"grad_norm": 0.4111901819705963,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.8014,
"num_input_tokens_seen": 14417920,
"step": 55
},
{
"epoch": 0.012595591542959963,
"grad_norm": 1.1243131160736084,
"learning_rate": 5.500000000000001e-05,
"loss": 1.3581,
"num_input_tokens_seen": 14680064,
"step": 56
},
{
"epoch": 0.01282051282051282,
"grad_norm": 0.37373456358909607,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.6183,
"num_input_tokens_seen": 14942208,
"step": 57
},
{
"epoch": 0.013045434098065677,
"grad_norm": 0.334053635597229,
"learning_rate": 5.6999999999999996e-05,
"loss": 1.5023,
"num_input_tokens_seen": 15204352,
"step": 58
},
{
"epoch": 0.013270355375618534,
"grad_norm": 0.4299324154853821,
"learning_rate": 5.8e-05,
"loss": 1.7593,
"num_input_tokens_seen": 15466496,
"step": 59
},
{
"epoch": 0.01349527665317139,
"grad_norm": 0.3081894516944885,
"learning_rate": 5.9e-05,
"loss": 1.3975,
"num_input_tokens_seen": 15728640,
"step": 60
},
{
"epoch": 0.013720197930724246,
"grad_norm": 0.38843464851379395,
"learning_rate": 6e-05,
"loss": 1.571,
"num_input_tokens_seen": 15990784,
"step": 61
},
{
"epoch": 0.013945119208277103,
"grad_norm": 0.6251524090766907,
"learning_rate": 6.1e-05,
"loss": 1.6475,
"num_input_tokens_seen": 16252928,
"step": 62
},
{
"epoch": 0.01417004048582996,
"grad_norm": 0.3833595812320709,
"learning_rate": 6.2e-05,
"loss": 1.7664,
"num_input_tokens_seen": 16515072,
"step": 63
},
{
"epoch": 0.014394961763382817,
"grad_norm": 0.4029980003833771,
"learning_rate": 6.3e-05,
"loss": 1.8962,
"num_input_tokens_seen": 16777216,
"step": 64
},
{
"epoch": 0.014619883040935672,
"grad_norm": 0.5438060164451599,
"learning_rate": 6.400000000000001e-05,
"loss": 1.8436,
"num_input_tokens_seen": 17039360,
"step": 65
},
{
"epoch": 0.014844804318488529,
"grad_norm": 0.41551777720451355,
"learning_rate": 6.500000000000001e-05,
"loss": 1.622,
"num_input_tokens_seen": 17301504,
"step": 66
},
{
"epoch": 0.015069725596041385,
"grad_norm": 0.6011863350868225,
"learning_rate": 6.6e-05,
"loss": 1.3474,
"num_input_tokens_seen": 17563648,
"step": 67
},
{
"epoch": 0.015294646873594242,
"grad_norm": 0.31412574648857117,
"learning_rate": 6.7e-05,
"loss": 1.6394,
"num_input_tokens_seen": 17825792,
"step": 68
},
{
"epoch": 0.0155195681511471,
"grad_norm": 0.3678301274776459,
"learning_rate": 6.800000000000001e-05,
"loss": 1.1412,
"num_input_tokens_seen": 18087936,
"step": 69
},
{
"epoch": 0.015744489428699954,
"grad_norm": 0.4133774936199188,
"learning_rate": 6.9e-05,
"loss": 1.8902,
"num_input_tokens_seen": 18350080,
"step": 70
},
{
"epoch": 0.015969410706252813,
"grad_norm": 0.5994505882263184,
"learning_rate": 7e-05,
"loss": 1.4304,
"num_input_tokens_seen": 18612224,
"step": 71
},
{
"epoch": 0.016194331983805668,
"grad_norm": 0.40050116181373596,
"learning_rate": 7.1e-05,
"loss": 1.2177,
"num_input_tokens_seen": 18874368,
"step": 72
},
{
"epoch": 0.016419253261358523,
"grad_norm": 0.4649305045604706,
"learning_rate": 7.2e-05,
"loss": 1.6078,
"num_input_tokens_seen": 19136512,
"step": 73
},
{
"epoch": 0.016644174538911382,
"grad_norm": 0.42778080701828003,
"learning_rate": 7.3e-05,
"loss": 1.5667,
"num_input_tokens_seen": 19398656,
"step": 74
},
{
"epoch": 0.016869095816464237,
"grad_norm": 0.44132548570632935,
"learning_rate": 7.4e-05,
"loss": 1.7146,
"num_input_tokens_seen": 19660800,
"step": 75
},
{
"epoch": 0.017094017094017096,
"grad_norm": 0.5165697932243347,
"learning_rate": 7.500000000000001e-05,
"loss": 1.8052,
"num_input_tokens_seen": 19922944,
"step": 76
},
{
"epoch": 0.01731893837156995,
"grad_norm": 0.32435211539268494,
"learning_rate": 7.6e-05,
"loss": 1.593,
"num_input_tokens_seen": 20185088,
"step": 77
},
{
"epoch": 0.017543859649122806,
"grad_norm": 0.39822056889533997,
"learning_rate": 7.7e-05,
"loss": 1.9039,
"num_input_tokens_seen": 20447232,
"step": 78
},
{
"epoch": 0.017768780926675665,
"grad_norm": 0.3774968981742859,
"learning_rate": 7.800000000000001e-05,
"loss": 1.9257,
"num_input_tokens_seen": 20709376,
"step": 79
},
{
"epoch": 0.01799370220422852,
"grad_norm": 0.29441991448402405,
"learning_rate": 7.900000000000001e-05,
"loss": 1.4104,
"num_input_tokens_seen": 20971520,
"step": 80
},
{
"epoch": 0.018218623481781375,
"grad_norm": 0.31885433197021484,
"learning_rate": 8e-05,
"loss": 1.1657,
"num_input_tokens_seen": 21233664,
"step": 81
},
{
"epoch": 0.018443544759334234,
"grad_norm": 2.536576986312866,
"learning_rate": 8.1e-05,
"loss": 1.6162,
"num_input_tokens_seen": 21495808,
"step": 82
},
{
"epoch": 0.01866846603688709,
"grad_norm": 0.42354825139045715,
"learning_rate": 8.2e-05,
"loss": 1.9433,
"num_input_tokens_seen": 21757952,
"step": 83
},
{
"epoch": 0.018893387314439947,
"grad_norm": 0.33853453397750854,
"learning_rate": 8.3e-05,
"loss": 1.4269,
"num_input_tokens_seen": 22020096,
"step": 84
},
{
"epoch": 0.019118308591992803,
"grad_norm": 0.3609357476234436,
"learning_rate": 8.4e-05,
"loss": 1.6874,
"num_input_tokens_seen": 22282240,
"step": 85
},
{
"epoch": 0.019343229869545658,
"grad_norm": 0.4715583324432373,
"learning_rate": 8.5e-05,
"loss": 1.7668,
"num_input_tokens_seen": 22544384,
"step": 86
},
{
"epoch": 0.019568151147098516,
"grad_norm": 0.4518505930900574,
"learning_rate": 8.6e-05,
"loss": 1.5703,
"num_input_tokens_seen": 22806528,
"step": 87
},
{
"epoch": 0.01979307242465137,
"grad_norm": 0.30384761095046997,
"learning_rate": 8.7e-05,
"loss": 1.0741,
"num_input_tokens_seen": 23068672,
"step": 88
},
{
"epoch": 0.02001799370220423,
"grad_norm": 0.2828017473220825,
"learning_rate": 8.800000000000001e-05,
"loss": 1.181,
"num_input_tokens_seen": 23330816,
"step": 89
},
{
"epoch": 0.020242914979757085,
"grad_norm": 0.4779614806175232,
"learning_rate": 8.900000000000001e-05,
"loss": 1.3726,
"num_input_tokens_seen": 23592960,
"step": 90
},
{
"epoch": 0.02046783625730994,
"grad_norm": 0.37162265181541443,
"learning_rate": 9e-05,
"loss": 1.1992,
"num_input_tokens_seen": 23855104,
"step": 91
},
{
"epoch": 0.0206927575348628,
"grad_norm": 0.3957003653049469,
"learning_rate": 9.1e-05,
"loss": 1.9987,
"num_input_tokens_seen": 24117248,
"step": 92
},
{
"epoch": 0.020917678812415654,
"grad_norm": 0.3890228271484375,
"learning_rate": 9.200000000000001e-05,
"loss": 1.7081,
"num_input_tokens_seen": 24379392,
"step": 93
},
{
"epoch": 0.02114260008996851,
"grad_norm": 0.4864268898963928,
"learning_rate": 9.300000000000001e-05,
"loss": 1.7492,
"num_input_tokens_seen": 24641536,
"step": 94
},
{
"epoch": 0.021367521367521368,
"grad_norm": 0.8580865263938904,
"learning_rate": 9.4e-05,
"loss": 1.896,
"num_input_tokens_seen": 24903680,
"step": 95
},
{
"epoch": 0.021592442645074223,
"grad_norm": 0.3866703510284424,
"learning_rate": 9.5e-05,
"loss": 1.3134,
"num_input_tokens_seen": 25165824,
"step": 96
},
{
"epoch": 0.02181736392262708,
"grad_norm": 0.46616268157958984,
"learning_rate": 9.6e-05,
"loss": 1.3536,
"num_input_tokens_seen": 25427968,
"step": 97
},
{
"epoch": 0.022042285200179937,
"grad_norm": 0.33651959896087646,
"learning_rate": 9.7e-05,
"loss": 1.5113,
"num_input_tokens_seen": 25690112,
"step": 98
},
{
"epoch": 0.022267206477732792,
"grad_norm": 0.6243886351585388,
"learning_rate": 9.8e-05,
"loss": 1.9306,
"num_input_tokens_seen": 25952256,
"step": 99
},
{
"epoch": 0.02249212775528565,
"grad_norm": 0.3050474524497986,
"learning_rate": 9.900000000000001e-05,
"loss": 1.4151,
"num_input_tokens_seen": 26214400,
"step": 100
},
{
"epoch": 0.022717049032838506,
"grad_norm": 0.32986557483673096,
"learning_rate": 0.0001,
"loss": 1.6279,
"num_input_tokens_seen": 26476544,
"step": 101
},
{
"epoch": 0.022941970310391364,
"grad_norm": 0.2438795268535614,
"learning_rate": 9.999969538288952e-05,
"loss": 1.107,
"num_input_tokens_seen": 26738688,
"step": 102
},
{
"epoch": 0.02316689158794422,
"grad_norm": 0.36484190821647644,
"learning_rate": 9.999878153526974e-05,
"loss": 1.9403,
"num_input_tokens_seen": 27000832,
"step": 103
},
{
"epoch": 0.023391812865497075,
"grad_norm": 0.4290502071380615,
"learning_rate": 9.999725846827562e-05,
"loss": 1.6049,
"num_input_tokens_seen": 27262976,
"step": 104
},
{
"epoch": 0.023616734143049933,
"grad_norm": 0.3903041183948517,
"learning_rate": 9.999512620046522e-05,
"loss": 1.5587,
"num_input_tokens_seen": 27525120,
"step": 105
},
{
"epoch": 0.02384165542060279,
"grad_norm": 0.3593505918979645,
"learning_rate": 9.999238475781957e-05,
"loss": 1.4838,
"num_input_tokens_seen": 27787264,
"step": 106
},
{
"epoch": 0.024066576698155647,
"grad_norm": 0.3363569378852844,
"learning_rate": 9.998903417374228e-05,
"loss": 1.8302,
"num_input_tokens_seen": 28049408,
"step": 107
},
{
"epoch": 0.024291497975708502,
"grad_norm": 0.8137016296386719,
"learning_rate": 9.998507448905917e-05,
"loss": 1.6889,
"num_input_tokens_seen": 28311552,
"step": 108
},
{
"epoch": 0.024516419253261357,
"grad_norm": 0.3933553695678711,
"learning_rate": 9.998050575201771e-05,
"loss": 1.8229,
"num_input_tokens_seen": 28573696,
"step": 109
},
{
"epoch": 0.024741340530814216,
"grad_norm": 0.414473295211792,
"learning_rate": 9.997532801828658e-05,
"loss": 1.5831,
"num_input_tokens_seen": 28835840,
"step": 110
},
{
"epoch": 0.02496626180836707,
"grad_norm": 0.2983182370662689,
"learning_rate": 9.99695413509548e-05,
"loss": 1.0271,
"num_input_tokens_seen": 29097984,
"step": 111
},
{
"epoch": 0.025191183085919926,
"grad_norm": 0.3780648708343506,
"learning_rate": 9.996314582053106e-05,
"loss": 1.6247,
"num_input_tokens_seen": 29360128,
"step": 112
},
{
"epoch": 0.025416104363472785,
"grad_norm": 0.3796830177307129,
"learning_rate": 9.995614150494293e-05,
"loss": 1.8959,
"num_input_tokens_seen": 29622272,
"step": 113
},
{
"epoch": 0.02564102564102564,
"grad_norm": 0.43109461665153503,
"learning_rate": 9.994852848953574e-05,
"loss": 1.6373,
"num_input_tokens_seen": 29884416,
"step": 114
},
{
"epoch": 0.0258659469185785,
"grad_norm": 0.4484831392765045,
"learning_rate": 9.99403068670717e-05,
"loss": 1.7273,
"num_input_tokens_seen": 30146560,
"step": 115
},
{
"epoch": 0.026090868196131354,
"grad_norm": 0.37315282225608826,
"learning_rate": 9.99314767377287e-05,
"loss": 1.1327,
"num_input_tokens_seen": 30408704,
"step": 116
},
{
"epoch": 0.02631578947368421,
"grad_norm": 0.4303310513496399,
"learning_rate": 9.992203820909906e-05,
"loss": 1.628,
"num_input_tokens_seen": 30670848,
"step": 117
},
{
"epoch": 0.026540710751237068,
"grad_norm": 0.4225418269634247,
"learning_rate": 9.991199139618827e-05,
"loss": 1.5496,
"num_input_tokens_seen": 30932992,
"step": 118
},
{
"epoch": 0.026765632028789923,
"grad_norm": 0.7125269770622253,
"learning_rate": 9.990133642141359e-05,
"loss": 1.8195,
"num_input_tokens_seen": 31195136,
"step": 119
},
{
"epoch": 0.02699055330634278,
"grad_norm": 0.4084630012512207,
"learning_rate": 9.98900734146025e-05,
"loss": 1.8634,
"num_input_tokens_seen": 31457280,
"step": 120
},
{
"epoch": 0.027215474583895637,
"grad_norm": 0.441657692193985,
"learning_rate": 9.987820251299122e-05,
"loss": 1.5867,
"num_input_tokens_seen": 31719424,
"step": 121
},
{
"epoch": 0.027440395861448492,
"grad_norm": 0.5232109427452087,
"learning_rate": 9.986572386122291e-05,
"loss": 1.9827,
"num_input_tokens_seen": 31981568,
"step": 122
},
{
"epoch": 0.02766531713900135,
"grad_norm": 0.45924124121665955,
"learning_rate": 9.985263761134602e-05,
"loss": 1.3527,
"num_input_tokens_seen": 32243712,
"step": 123
},
{
"epoch": 0.027890238416554206,
"grad_norm": 0.3332464396953583,
"learning_rate": 9.983894392281237e-05,
"loss": 1.3638,
"num_input_tokens_seen": 32505856,
"step": 124
},
{
"epoch": 0.028115159694107064,
"grad_norm": 0.5264614224433899,
"learning_rate": 9.982464296247522e-05,
"loss": 1.2889,
"num_input_tokens_seen": 32768000,
"step": 125
},
{
"epoch": 0.02834008097165992,
"grad_norm": 0.4572417736053467,
"learning_rate": 9.980973490458728e-05,
"loss": 1.3363,
"num_input_tokens_seen": 33030144,
"step": 126
},
{
"epoch": 0.028565002249212774,
"grad_norm": 0.36014947295188904,
"learning_rate": 9.979421993079852e-05,
"loss": 1.6632,
"num_input_tokens_seen": 33292288,
"step": 127
},
{
"epoch": 0.028789923526765633,
"grad_norm": 0.3196695148944855,
"learning_rate": 9.977809823015401e-05,
"loss": 1.5466,
"num_input_tokens_seen": 33554432,
"step": 128
},
{
"epoch": 0.029014844804318488,
"grad_norm": 0.9590062499046326,
"learning_rate": 9.976136999909156e-05,
"loss": 1.8428,
"num_input_tokens_seen": 33816576,
"step": 129
},
{
"epoch": 0.029239766081871343,
"grad_norm": 0.3693449795246124,
"learning_rate": 9.974403544143941e-05,
"loss": 1.145,
"num_input_tokens_seen": 34078720,
"step": 130
},
{
"epoch": 0.029464687359424202,
"grad_norm": 0.3786580264568329,
"learning_rate": 9.972609476841367e-05,
"loss": 1.3506,
"num_input_tokens_seen": 34340864,
"step": 131
},
{
"epoch": 0.029689608636977057,
"grad_norm": 0.5296582579612732,
"learning_rate": 9.970754819861577e-05,
"loss": 1.8347,
"num_input_tokens_seen": 34603008,
"step": 132
},
{
"epoch": 0.029914529914529916,
"grad_norm": 1.3651492595672607,
"learning_rate": 9.968839595802982e-05,
"loss": 1.3145,
"num_input_tokens_seen": 34865152,
"step": 133
},
{
"epoch": 0.03013945119208277,
"grad_norm": 0.4333643317222595,
"learning_rate": 9.966863828001982e-05,
"loss": 1.6935,
"num_input_tokens_seen": 35127296,
"step": 134
},
{
"epoch": 0.030364372469635626,
"grad_norm": 0.33044588565826416,
"learning_rate": 9.964827540532685e-05,
"loss": 1.0532,
"num_input_tokens_seen": 35389440,
"step": 135
},
{
"epoch": 0.030589293747188485,
"grad_norm": 0.4441980719566345,
"learning_rate": 9.962730758206611e-05,
"loss": 1.5709,
"num_input_tokens_seen": 35651584,
"step": 136
},
{
"epoch": 0.03081421502474134,
"grad_norm": 0.7452576160430908,
"learning_rate": 9.96057350657239e-05,
"loss": 1.378,
"num_input_tokens_seen": 35913728,
"step": 137
},
{
"epoch": 0.0310391363022942,
"grad_norm": 0.4860914945602417,
"learning_rate": 9.958355811915451e-05,
"loss": 1.6672,
"num_input_tokens_seen": 36175872,
"step": 138
},
{
"epoch": 0.03126405757984705,
"grad_norm": 0.35408177971839905,
"learning_rate": 9.956077701257709e-05,
"loss": 1.4359,
"num_input_tokens_seen": 36438016,
"step": 139
},
{
"epoch": 0.03148897885739991,
"grad_norm": 0.4450446665287018,
"learning_rate": 9.953739202357218e-05,
"loss": 1.8872,
"num_input_tokens_seen": 36700160,
"step": 140
},
{
"epoch": 0.03171390013495277,
"grad_norm": 0.3895055949687958,
"learning_rate": 9.951340343707852e-05,
"loss": 1.7616,
"num_input_tokens_seen": 36962304,
"step": 141
},
{
"epoch": 0.031938821412505626,
"grad_norm": 0.32615330815315247,
"learning_rate": 9.948881154538945e-05,
"loss": 1.669,
"num_input_tokens_seen": 37224448,
"step": 142
},
{
"epoch": 0.03216374269005848,
"grad_norm": 0.5241135954856873,
"learning_rate": 9.946361664814943e-05,
"loss": 2.1203,
"num_input_tokens_seen": 37486592,
"step": 143
},
{
"epoch": 0.032388663967611336,
"grad_norm": 0.36470967531204224,
"learning_rate": 9.94378190523503e-05,
"loss": 1.2967,
"num_input_tokens_seen": 37748736,
"step": 144
},
{
"epoch": 0.032613585245164195,
"grad_norm": 0.39427393674850464,
"learning_rate": 9.941141907232765e-05,
"loss": 1.8811,
"num_input_tokens_seen": 38010880,
"step": 145
},
{
"epoch": 0.03283850652271705,
"grad_norm": 0.42225828766822815,
"learning_rate": 9.938441702975689e-05,
"loss": 1.4219,
"num_input_tokens_seen": 38273024,
"step": 146
},
{
"epoch": 0.033063427800269905,
"grad_norm": 0.2807534337043762,
"learning_rate": 9.93568132536494e-05,
"loss": 1.3973,
"num_input_tokens_seen": 38535168,
"step": 147
},
{
"epoch": 0.033288349077822764,
"grad_norm": 0.3927517533302307,
"learning_rate": 9.932860808034848e-05,
"loss": 1.2116,
"num_input_tokens_seen": 38797312,
"step": 148
},
{
"epoch": 0.033513270355375616,
"grad_norm": 0.38540998101234436,
"learning_rate": 9.929980185352526e-05,
"loss": 1.3159,
"num_input_tokens_seen": 39059456,
"step": 149
},
{
"epoch": 0.033738191632928474,
"grad_norm": 0.3624691963195801,
"learning_rate": 9.927039492417452e-05,
"loss": 1.5602,
"num_input_tokens_seen": 39321600,
"step": 150
},
{
"epoch": 0.03396311291048133,
"grad_norm": 0.3228714168071747,
"learning_rate": 9.924038765061042e-05,
"loss": 1.6376,
"num_input_tokens_seen": 39583744,
"step": 151
},
{
"epoch": 0.03418803418803419,
"grad_norm": 0.33287134766578674,
"learning_rate": 9.92097803984621e-05,
"loss": 1.565,
"num_input_tokens_seen": 39845888,
"step": 152
},
{
"epoch": 0.03441295546558704,
"grad_norm": 1.5335997343063354,
"learning_rate": 9.917857354066931e-05,
"loss": 1.97,
"num_input_tokens_seen": 40108032,
"step": 153
},
{
"epoch": 0.0346378767431399,
"grad_norm": 0.36973464488983154,
"learning_rate": 9.914676745747772e-05,
"loss": 2.0821,
"num_input_tokens_seen": 40370176,
"step": 154
},
{
"epoch": 0.03486279802069276,
"grad_norm": 0.3719539940357208,
"learning_rate": 9.911436253643445e-05,
"loss": 2.0158,
"num_input_tokens_seen": 40632320,
"step": 155
},
{
"epoch": 0.03508771929824561,
"grad_norm": 0.4657575488090515,
"learning_rate": 9.908135917238321e-05,
"loss": 1.6059,
"num_input_tokens_seen": 40894464,
"step": 156
},
{
"epoch": 0.03531264057579847,
"grad_norm": 0.3838348090648651,
"learning_rate": 9.904775776745958e-05,
"loss": 1.7565,
"num_input_tokens_seen": 41156608,
"step": 157
},
{
"epoch": 0.03553756185335133,
"grad_norm": 0.29375481605529785,
"learning_rate": 9.901355873108609e-05,
"loss": 1.4024,
"num_input_tokens_seen": 41418752,
"step": 158
},
{
"epoch": 0.03576248313090418,
"grad_norm": 0.30352264642715454,
"learning_rate": 9.89787624799672e-05,
"loss": 1.5428,
"num_input_tokens_seen": 41680896,
"step": 159
},
{
"epoch": 0.03598740440845704,
"grad_norm": 0.49917876720428467,
"learning_rate": 9.894336943808426e-05,
"loss": 1.9028,
"num_input_tokens_seen": 41943040,
"step": 160
},
{
"epoch": 0.0362123256860099,
"grad_norm": 0.4412113428115845,
"learning_rate": 9.890738003669029e-05,
"loss": 1.9102,
"num_input_tokens_seen": 42205184,
"step": 161
},
{
"epoch": 0.03643724696356275,
"grad_norm": 0.5141309499740601,
"learning_rate": 9.88707947143048e-05,
"loss": 1.961,
"num_input_tokens_seen": 42467328,
"step": 162
},
{
"epoch": 0.03666216824111561,
"grad_norm": 0.423850953578949,
"learning_rate": 9.88336139167084e-05,
"loss": 1.4627,
"num_input_tokens_seen": 42729472,
"step": 163
},
{
"epoch": 0.03688708951866847,
"grad_norm": 0.2972734570503235,
"learning_rate": 9.879583809693738e-05,
"loss": 1.5094,
"num_input_tokens_seen": 42991616,
"step": 164
},
{
"epoch": 0.037112010796221326,
"grad_norm": 0.32651349902153015,
"learning_rate": 9.875746771527816e-05,
"loss": 1.2833,
"num_input_tokens_seen": 43253760,
"step": 165
},
{
"epoch": 0.03733693207377418,
"grad_norm": 0.41867563128471375,
"learning_rate": 9.871850323926177e-05,
"loss": 1.5707,
"num_input_tokens_seen": 43515904,
"step": 166
},
{
"epoch": 0.037561853351327036,
"grad_norm": 0.2959195375442505,
"learning_rate": 9.867894514365802e-05,
"loss": 1.092,
"num_input_tokens_seen": 43778048,
"step": 167
},
{
"epoch": 0.037786774628879895,
"grad_norm": 0.25508689880371094,
"learning_rate": 9.863879391046984e-05,
"loss": 1.1717,
"num_input_tokens_seen": 44040192,
"step": 168
},
{
"epoch": 0.038011695906432746,
"grad_norm": 0.2833406925201416,
"learning_rate": 9.859805002892732e-05,
"loss": 1.553,
"num_input_tokens_seen": 44302336,
"step": 169
},
{
"epoch": 0.038236617183985605,
"grad_norm": 0.3006632328033447,
"learning_rate": 9.855671399548181e-05,
"loss": 1.2807,
"num_input_tokens_seen": 44564480,
"step": 170
},
{
"epoch": 0.038461538461538464,
"grad_norm": 0.37500298023223877,
"learning_rate": 9.851478631379982e-05,
"loss": 1.3634,
"num_input_tokens_seen": 44826624,
"step": 171
},
{
"epoch": 0.038686459739091315,
"grad_norm": 0.4075554311275482,
"learning_rate": 9.847226749475695e-05,
"loss": 1.3434,
"num_input_tokens_seen": 45088768,
"step": 172
},
{
"epoch": 0.038911381016644174,
"grad_norm": 0.5580770969390869,
"learning_rate": 9.842915805643155e-05,
"loss": 1.6386,
"num_input_tokens_seen": 45350912,
"step": 173
},
{
"epoch": 0.03913630229419703,
"grad_norm": 0.2714264392852783,
"learning_rate": 9.838545852409857e-05,
"loss": 1.3893,
"num_input_tokens_seen": 45613056,
"step": 174
},
{
"epoch": 0.039361223571749884,
"grad_norm": 0.2713785171508789,
"learning_rate": 9.834116943022298e-05,
"loss": 1.3902,
"num_input_tokens_seen": 45875200,
"step": 175
},
{
"epoch": 0.03958614484930274,
"grad_norm": 0.38365769386291504,
"learning_rate": 9.829629131445342e-05,
"loss": 2.0312,
"num_input_tokens_seen": 46137344,
"step": 176
},
{
"epoch": 0.0398110661268556,
"grad_norm": 0.3616935610771179,
"learning_rate": 9.825082472361557e-05,
"loss": 1.8365,
"num_input_tokens_seen": 46399488,
"step": 177
},
{
"epoch": 0.04003598740440846,
"grad_norm": 0.3607868254184723,
"learning_rate": 9.820477021170551e-05,
"loss": 1.4036,
"num_input_tokens_seen": 46661632,
"step": 178
},
{
"epoch": 0.04026090868196131,
"grad_norm": 0.3717537224292755,
"learning_rate": 9.815812833988291e-05,
"loss": 1.7118,
"num_input_tokens_seen": 46923776,
"step": 179
},
{
"epoch": 0.04048582995951417,
"grad_norm": 0.30048802495002747,
"learning_rate": 9.811089967646428e-05,
"loss": 1.6556,
"num_input_tokens_seen": 47185920,
"step": 180
},
{
"epoch": 0.04071075123706703,
"grad_norm": 0.32800036668777466,
"learning_rate": 9.806308479691595e-05,
"loss": 1.1805,
"num_input_tokens_seen": 47448064,
"step": 181
},
{
"epoch": 0.04093567251461988,
"grad_norm": 0.30880051851272583,
"learning_rate": 9.801468428384716e-05,
"loss": 1.2028,
"num_input_tokens_seen": 47710208,
"step": 182
},
{
"epoch": 0.04116059379217274,
"grad_norm": 0.27152401208877563,
"learning_rate": 9.796569872700288e-05,
"loss": 1.5587,
"num_input_tokens_seen": 47972352,
"step": 183
},
{
"epoch": 0.0413855150697256,
"grad_norm": 0.3760753870010376,
"learning_rate": 9.791612872325667e-05,
"loss": 1.9057,
"num_input_tokens_seen": 48234496,
"step": 184
},
{
"epoch": 0.04161043634727845,
"grad_norm": 0.31716543436050415,
"learning_rate": 9.786597487660337e-05,
"loss": 1.7082,
"num_input_tokens_seen": 48496640,
"step": 185
},
{
"epoch": 0.04183535762483131,
"grad_norm": 0.26593077182769775,
"learning_rate": 9.781523779815179e-05,
"loss": 1.1768,
"num_input_tokens_seen": 48758784,
"step": 186
},
{
"epoch": 0.04206027890238417,
"grad_norm": 0.3927631080150604,
"learning_rate": 9.776391810611718e-05,
"loss": 0.8201,
"num_input_tokens_seen": 49020928,
"step": 187
},
{
"epoch": 0.04228520017993702,
"grad_norm": 0.43734124302864075,
"learning_rate": 9.771201642581385e-05,
"loss": 1.6303,
"num_input_tokens_seen": 49283072,
"step": 188
},
{
"epoch": 0.04251012145748988,
"grad_norm": 0.2875794768333435,
"learning_rate": 9.765953338964735e-05,
"loss": 1.373,
"num_input_tokens_seen": 49545216,
"step": 189
},
{
"epoch": 0.042735042735042736,
"grad_norm": 0.2745586037635803,
"learning_rate": 9.760646963710694e-05,
"loss": 1.1446,
"num_input_tokens_seen": 49807360,
"step": 190
},
{
"epoch": 0.042959964012595594,
"grad_norm": 0.5311657786369324,
"learning_rate": 9.755282581475769e-05,
"loss": 1.8471,
"num_input_tokens_seen": 50069504,
"step": 191
},
{
"epoch": 0.043184885290148446,
"grad_norm": 0.33446601033210754,
"learning_rate": 9.749860257623263e-05,
"loss": 1.0208,
"num_input_tokens_seen": 50331648,
"step": 192
},
{
"epoch": 0.043409806567701305,
"grad_norm": 0.40774935483932495,
"learning_rate": 9.744380058222483e-05,
"loss": 1.8295,
"num_input_tokens_seen": 50593792,
"step": 193
},
{
"epoch": 0.04363472784525416,
"grad_norm": 0.41755542159080505,
"learning_rate": 9.73884205004793e-05,
"loss": 1.6565,
"num_input_tokens_seen": 50855936,
"step": 194
},
{
"epoch": 0.043859649122807015,
"grad_norm": 0.3205357789993286,
"learning_rate": 9.733246300578483e-05,
"loss": 1.3403,
"num_input_tokens_seen": 51118080,
"step": 195
},
{
"epoch": 0.044084570400359874,
"grad_norm": 0.3782586455345154,
"learning_rate": 9.727592877996585e-05,
"loss": 1.7666,
"num_input_tokens_seen": 51380224,
"step": 196
},
{
"epoch": 0.04430949167791273,
"grad_norm": 0.2624562978744507,
"learning_rate": 9.721881851187406e-05,
"loss": 1.2044,
"num_input_tokens_seen": 51642368,
"step": 197
},
{
"epoch": 0.044534412955465584,
"grad_norm": 0.38200780749320984,
"learning_rate": 9.716113289738004e-05,
"loss": 1.6842,
"num_input_tokens_seen": 51904512,
"step": 198
},
{
"epoch": 0.04475933423301844,
"grad_norm": 0.5644131898880005,
"learning_rate": 9.710287263936484e-05,
"loss": 1.9484,
"num_input_tokens_seen": 52166656,
"step": 199
},
{
"epoch": 0.0449842555105713,
"grad_norm": 0.8398867845535278,
"learning_rate": 9.704403844771128e-05,
"loss": 1.2177,
"num_input_tokens_seen": 52428800,
"step": 200
},
{
"epoch": 0.04520917678812416,
"grad_norm": 0.33354583382606506,
"learning_rate": 9.698463103929542e-05,
"loss": 1.4938,
"num_input_tokens_seen": 52690944,
"step": 201
},
{
"epoch": 0.04543409806567701,
"grad_norm": 0.44575589895248413,
"learning_rate": 9.69246511379778e-05,
"loss": 1.5627,
"num_input_tokens_seen": 52953088,
"step": 202
},
{
"epoch": 0.04565901934322987,
"grad_norm": 0.2522154450416565,
"learning_rate": 9.686409947459458e-05,
"loss": 1.3527,
"num_input_tokens_seen": 53215232,
"step": 203
},
{
"epoch": 0.04588394062078273,
"grad_norm": 0.380891352891922,
"learning_rate": 9.680297678694867e-05,
"loss": 1.4753,
"num_input_tokens_seen": 53477376,
"step": 204
},
{
"epoch": 0.04610886189833558,
"grad_norm": 0.40885552763938904,
"learning_rate": 9.674128381980072e-05,
"loss": 2.038,
"num_input_tokens_seen": 53739520,
"step": 205
},
{
"epoch": 0.04633378317588844,
"grad_norm": 0.434465229511261,
"learning_rate": 9.667902132486009e-05,
"loss": 1.2983,
"num_input_tokens_seen": 54001664,
"step": 206
},
{
"epoch": 0.0465587044534413,
"grad_norm": 0.45052337646484375,
"learning_rate": 9.661619006077561e-05,
"loss": 1.6453,
"num_input_tokens_seen": 54263808,
"step": 207
},
{
"epoch": 0.04678362573099415,
"grad_norm": 0.3216852843761444,
"learning_rate": 9.655279079312642e-05,
"loss": 2.0549,
"num_input_tokens_seen": 54525952,
"step": 208
},
{
"epoch": 0.04700854700854701,
"grad_norm": 0.2805038094520569,
"learning_rate": 9.648882429441257e-05,
"loss": 1.3867,
"num_input_tokens_seen": 54788096,
"step": 209
},
{
"epoch": 0.04723346828609987,
"grad_norm": 0.34908825159072876,
"learning_rate": 9.642429134404569e-05,
"loss": 1.3238,
"num_input_tokens_seen": 55050240,
"step": 210
},
{
"epoch": 0.04745838956365272,
"grad_norm": 0.42375364899635315,
"learning_rate": 9.635919272833938e-05,
"loss": 1.3649,
"num_input_tokens_seen": 55312384,
"step": 211
},
{
"epoch": 0.04768331084120558,
"grad_norm": 0.300839364528656,
"learning_rate": 9.629352924049975e-05,
"loss": 1.7611,
"num_input_tokens_seen": 55574528,
"step": 212
},
{
"epoch": 0.047908232118758436,
"grad_norm": 0.2923290431499481,
"learning_rate": 9.622730168061567e-05,
"loss": 1.8438,
"num_input_tokens_seen": 55836672,
"step": 213
},
{
"epoch": 0.048133153396311294,
"grad_norm": 0.28767311573028564,
"learning_rate": 9.616051085564906e-05,
"loss": 1.3909,
"num_input_tokens_seen": 56098816,
"step": 214
},
{
"epoch": 0.048358074673864146,
"grad_norm": 0.3666403293609619,
"learning_rate": 9.609315757942503e-05,
"loss": 1.6141,
"num_input_tokens_seen": 56360960,
"step": 215
},
{
"epoch": 0.048582995951417005,
"grad_norm": 0.3564029932022095,
"learning_rate": 9.602524267262203e-05,
"loss": 1.5816,
"num_input_tokens_seen": 56623104,
"step": 216
},
{
"epoch": 0.04880791722896986,
"grad_norm": 0.3815251588821411,
"learning_rate": 9.595676696276172e-05,
"loss": 0.868,
"num_input_tokens_seen": 56885248,
"step": 217
},
{
"epoch": 0.049032838506522715,
"grad_norm": 0.4164994955062866,
"learning_rate": 9.588773128419906e-05,
"loss": 1.5668,
"num_input_tokens_seen": 57147392,
"step": 218
},
{
"epoch": 0.049257759784075573,
"grad_norm": 0.46035948395729065,
"learning_rate": 9.581813647811198e-05,
"loss": 1.7141,
"num_input_tokens_seen": 57409536,
"step": 219
},
{
"epoch": 0.04948268106162843,
"grad_norm": 0.27215978503227234,
"learning_rate": 9.574798339249125e-05,
"loss": 1.258,
"num_input_tokens_seen": 57671680,
"step": 220
},
{
"epoch": 0.049707602339181284,
"grad_norm": 0.2921265959739685,
"learning_rate": 9.567727288213005e-05,
"loss": 1.3348,
"num_input_tokens_seen": 57933824,
"step": 221
},
{
"epoch": 0.04993252361673414,
"grad_norm": 0.3707129955291748,
"learning_rate": 9.560600580861365e-05,
"loss": 1.6,
"num_input_tokens_seen": 58195968,
"step": 222
},
{
"epoch": 0.050157444894287,
"grad_norm": 0.2560347616672516,
"learning_rate": 9.553418304030886e-05,
"loss": 1.0974,
"num_input_tokens_seen": 58458112,
"step": 223
},
{
"epoch": 0.05038236617183985,
"grad_norm": 0.5279447436332703,
"learning_rate": 9.546180545235344e-05,
"loss": 1.5502,
"num_input_tokens_seen": 58720256,
"step": 224
},
{
"epoch": 0.05060728744939271,
"grad_norm": 0.7471780776977539,
"learning_rate": 9.538887392664544e-05,
"loss": 1.6109,
"num_input_tokens_seen": 58982400,
"step": 225
},
{
"epoch": 0.05083220872694557,
"grad_norm": 0.37901559472084045,
"learning_rate": 9.53153893518325e-05,
"loss": 1.4102,
"num_input_tokens_seen": 59244544,
"step": 226
},
{
"epoch": 0.05105713000449843,
"grad_norm": 0.359701007604599,
"learning_rate": 9.524135262330098e-05,
"loss": 1.6576,
"num_input_tokens_seen": 59506688,
"step": 227
},
{
"epoch": 0.05128205128205128,
"grad_norm": 0.5791448354721069,
"learning_rate": 9.516676464316505e-05,
"loss": 1.618,
"num_input_tokens_seen": 59768832,
"step": 228
},
{
"epoch": 0.05150697255960414,
"grad_norm": 0.37227171659469604,
"learning_rate": 9.50916263202557e-05,
"loss": 1.6593,
"num_input_tokens_seen": 60030976,
"step": 229
},
{
"epoch": 0.051731893837157,
"grad_norm": 0.3083772659301758,
"learning_rate": 9.501593857010969e-05,
"loss": 1.5161,
"num_input_tokens_seen": 60293120,
"step": 230
},
{
"epoch": 0.05195681511470985,
"grad_norm": 0.4033567011356354,
"learning_rate": 9.493970231495835e-05,
"loss": 1.4406,
"num_input_tokens_seen": 60555264,
"step": 231
},
{
"epoch": 0.05218173639226271,
"grad_norm": 0.6083555221557617,
"learning_rate": 9.486291848371643e-05,
"loss": 1.9704,
"num_input_tokens_seen": 60817408,
"step": 232
},
{
"epoch": 0.052406657669815566,
"grad_norm": 0.3136764168739319,
"learning_rate": 9.478558801197065e-05,
"loss": 1.7413,
"num_input_tokens_seen": 61079552,
"step": 233
},
{
"epoch": 0.05263157894736842,
"grad_norm": 0.34223246574401855,
"learning_rate": 9.47077118419684e-05,
"loss": 1.5267,
"num_input_tokens_seen": 61341696,
"step": 234
},
{
"epoch": 0.05285650022492128,
"grad_norm": 0.41066691279411316,
"learning_rate": 9.462929092260628e-05,
"loss": 1.6758,
"num_input_tokens_seen": 61603840,
"step": 235
},
{
"epoch": 0.053081421502474135,
"grad_norm": 0.27612629532814026,
"learning_rate": 9.45503262094184e-05,
"loss": 1.3576,
"num_input_tokens_seen": 61865984,
"step": 236
},
{
"epoch": 0.053306342780026994,
"grad_norm": 0.30329903960227966,
"learning_rate": 9.447081866456489e-05,
"loss": 1.3036,
"num_input_tokens_seen": 62128128,
"step": 237
},
{
"epoch": 0.053531264057579846,
"grad_norm": 0.5270364880561829,
"learning_rate": 9.439076925682006e-05,
"loss": 1.6004,
"num_input_tokens_seen": 62390272,
"step": 238
},
{
"epoch": 0.053756185335132704,
"grad_norm": 0.4517534375190735,
"learning_rate": 9.431017896156074e-05,
"loss": 1.8651,
"num_input_tokens_seen": 62652416,
"step": 239
},
{
"epoch": 0.05398110661268556,
"grad_norm": 0.3612141013145447,
"learning_rate": 9.42290487607542e-05,
"loss": 1.8875,
"num_input_tokens_seen": 62914560,
"step": 240
},
{
"epoch": 0.054206027890238415,
"grad_norm": 0.3406079411506653,
"learning_rate": 9.414737964294636e-05,
"loss": 1.179,
"num_input_tokens_seen": 63176704,
"step": 241
},
{
"epoch": 0.05443094916779127,
"grad_norm": 0.2817097306251526,
"learning_rate": 9.40651726032496e-05,
"loss": 1.1935,
"num_input_tokens_seen": 63438848,
"step": 242
},
{
"epoch": 0.05465587044534413,
"grad_norm": 0.3395964205265045,
"learning_rate": 9.398242864333083e-05,
"loss": 1.3751,
"num_input_tokens_seen": 63700992,
"step": 243
},
{
"epoch": 0.054880791722896984,
"grad_norm": 0.35885000228881836,
"learning_rate": 9.389914877139903e-05,
"loss": 1.6456,
"num_input_tokens_seen": 63963136,
"step": 244
},
{
"epoch": 0.05510571300044984,
"grad_norm": 0.35253003239631653,
"learning_rate": 9.381533400219318e-05,
"loss": 1.2927,
"num_input_tokens_seen": 64225280,
"step": 245
},
{
"epoch": 0.0553306342780027,
"grad_norm": 0.40748098492622375,
"learning_rate": 9.373098535696979e-05,
"loss": 1.6064,
"num_input_tokens_seen": 64487424,
"step": 246
},
{
"epoch": 0.05555555555555555,
"grad_norm": 0.41558191180229187,
"learning_rate": 9.364610386349049e-05,
"loss": 1.2848,
"num_input_tokens_seen": 64749568,
"step": 247
},
{
"epoch": 0.05578047683310841,
"grad_norm": 0.47047188878059387,
"learning_rate": 9.356069055600948e-05,
"loss": 2.0327,
"num_input_tokens_seen": 65011712,
"step": 248
},
{
"epoch": 0.05600539811066127,
"grad_norm": 0.3283202648162842,
"learning_rate": 9.347474647526095e-05,
"loss": 1.6587,
"num_input_tokens_seen": 65273856,
"step": 249
},
{
"epoch": 0.05623031938821413,
"grad_norm": 0.36125075817108154,
"learning_rate": 9.338827266844644e-05,
"loss": 1.5436,
"num_input_tokens_seen": 65536000,
"step": 250
},
{
"epoch": 0.05645524066576698,
"grad_norm": 0.5791629552841187,
"learning_rate": 9.330127018922194e-05,
"loss": 2.1172,
"num_input_tokens_seen": 65798144,
"step": 251
},
{
"epoch": 0.05668016194331984,
"grad_norm": 0.28609782457351685,
"learning_rate": 9.321374009768525e-05,
"loss": 1.2856,
"num_input_tokens_seen": 66060288,
"step": 252
},
{
"epoch": 0.0569050832208727,
"grad_norm": 0.31573817133903503,
"learning_rate": 9.312568346036288e-05,
"loss": 1.0411,
"num_input_tokens_seen": 66322432,
"step": 253
},
{
"epoch": 0.05713000449842555,
"grad_norm": 0.3822273910045624,
"learning_rate": 9.30371013501972e-05,
"loss": 2.061,
"num_input_tokens_seen": 66584576,
"step": 254
},
{
"epoch": 0.05735492577597841,
"grad_norm": 0.2760697901248932,
"learning_rate": 9.294799484653323e-05,
"loss": 1.1583,
"num_input_tokens_seen": 66846720,
"step": 255
},
{
"epoch": 0.057579847053531266,
"grad_norm": 0.2954143285751343,
"learning_rate": 9.285836503510562e-05,
"loss": 1.4169,
"num_input_tokens_seen": 67108864,
"step": 256
},
{
"epoch": 0.05780476833108412,
"grad_norm": 0.41817978024482727,
"learning_rate": 9.276821300802534e-05,
"loss": 1.6938,
"num_input_tokens_seen": 67371008,
"step": 257
},
{
"epoch": 0.058029689608636977,
"grad_norm": 0.31263795495033264,
"learning_rate": 9.267753986376637e-05,
"loss": 1.3342,
"num_input_tokens_seen": 67633152,
"step": 258
},
{
"epoch": 0.058254610886189835,
"grad_norm": 0.35715246200561523,
"learning_rate": 9.258634670715238e-05,
"loss": 1.6498,
"num_input_tokens_seen": 67895296,
"step": 259
},
{
"epoch": 0.05847953216374269,
"grad_norm": 0.29079005122184753,
"learning_rate": 9.249463464934321e-05,
"loss": 1.745,
"num_input_tokens_seen": 68157440,
"step": 260
},
{
"epoch": 0.058704453441295545,
"grad_norm": 0.27173885703086853,
"learning_rate": 9.24024048078213e-05,
"loss": 1.1948,
"num_input_tokens_seen": 68419584,
"step": 261
},
{
"epoch": 0.058929374718848404,
"grad_norm": 0.5002535581588745,
"learning_rate": 9.230965830637821e-05,
"loss": 1.5592,
"num_input_tokens_seen": 68681728,
"step": 262
},
{
"epoch": 0.05915429599640126,
"grad_norm": 0.38698941469192505,
"learning_rate": 9.221639627510076e-05,
"loss": 1.9042,
"num_input_tokens_seen": 68943872,
"step": 263
},
{
"epoch": 0.059379217273954114,
"grad_norm": 0.4194246828556061,
"learning_rate": 9.212261985035739e-05,
"loss": 1.7498,
"num_input_tokens_seen": 69206016,
"step": 264
},
{
"epoch": 0.05960413855150697,
"grad_norm": 0.4946896433830261,
"learning_rate": 9.202833017478422e-05,
"loss": 1.4112,
"num_input_tokens_seen": 69468160,
"step": 265
},
{
"epoch": 0.05982905982905983,
"grad_norm": 0.3843882083892822,
"learning_rate": 9.193352839727121e-05,
"loss": 1.5777,
"num_input_tokens_seen": 69730304,
"step": 266
},
{
"epoch": 0.06005398110661268,
"grad_norm": 0.4122903048992157,
"learning_rate": 9.183821567294809e-05,
"loss": 1.6184,
"num_input_tokens_seen": 69992448,
"step": 267
},
{
"epoch": 0.06027890238416554,
"grad_norm": 0.3298034071922302,
"learning_rate": 9.174239316317033e-05,
"loss": 1.3252,
"num_input_tokens_seen": 70254592,
"step": 268
},
{
"epoch": 0.0605038236617184,
"grad_norm": 0.40769994258880615,
"learning_rate": 9.164606203550497e-05,
"loss": 1.8223,
"num_input_tokens_seen": 70516736,
"step": 269
},
{
"epoch": 0.06072874493927125,
"grad_norm": 0.6295496821403503,
"learning_rate": 9.154922346371642e-05,
"loss": 1.9614,
"num_input_tokens_seen": 70778880,
"step": 270
},
{
"epoch": 0.06095366621682411,
"grad_norm": 1.0290013551712036,
"learning_rate": 9.145187862775209e-05,
"loss": 1.3658,
"num_input_tokens_seen": 71041024,
"step": 271
},
{
"epoch": 0.06117858749437697,
"grad_norm": 0.3666732907295227,
"learning_rate": 9.135402871372808e-05,
"loss": 1.1264,
"num_input_tokens_seen": 71303168,
"step": 272
},
{
"epoch": 0.06140350877192982,
"grad_norm": 0.35897478461265564,
"learning_rate": 9.125567491391476e-05,
"loss": 1.8494,
"num_input_tokens_seen": 71565312,
"step": 273
},
{
"epoch": 0.06162843004948268,
"grad_norm": 0.3326916992664337,
"learning_rate": 9.11568184267221e-05,
"loss": 1.3847,
"num_input_tokens_seen": 71827456,
"step": 274
},
{
"epoch": 0.06185335132703554,
"grad_norm": 0.5763710737228394,
"learning_rate": 9.105746045668521e-05,
"loss": 1.7974,
"num_input_tokens_seen": 72089600,
"step": 275
},
{
"epoch": 0.0620782726045884,
"grad_norm": 0.2818114757537842,
"learning_rate": 9.09576022144496e-05,
"loss": 1.096,
"num_input_tokens_seen": 72351744,
"step": 276
},
{
"epoch": 0.06230319388214125,
"grad_norm": 0.28481417894363403,
"learning_rate": 9.085724491675642e-05,
"loss": 1.5213,
"num_input_tokens_seen": 72613888,
"step": 277
},
{
"epoch": 0.0625281151596941,
"grad_norm": 0.42331910133361816,
"learning_rate": 9.075638978642771e-05,
"loss": 1.3263,
"num_input_tokens_seen": 72876032,
"step": 278
},
{
"epoch": 0.06275303643724696,
"grad_norm": 0.3151121437549591,
"learning_rate": 9.065503805235138e-05,
"loss": 1.7154,
"num_input_tokens_seen": 73138176,
"step": 279
},
{
"epoch": 0.06297795771479982,
"grad_norm": 0.3964519202709198,
"learning_rate": 9.055319094946633e-05,
"loss": 1.4097,
"num_input_tokens_seen": 73400320,
"step": 280
},
{
"epoch": 0.06320287899235268,
"grad_norm": 0.47799408435821533,
"learning_rate": 9.045084971874738e-05,
"loss": 1.468,
"num_input_tokens_seen": 73662464,
"step": 281
},
{
"epoch": 0.06342780026990553,
"grad_norm": 0.30549415946006775,
"learning_rate": 9.034801560719011e-05,
"loss": 1.5141,
"num_input_tokens_seen": 73924608,
"step": 282
},
{
"epoch": 0.0636527215474584,
"grad_norm": 0.34683847427368164,
"learning_rate": 9.02446898677957e-05,
"loss": 1.5819,
"num_input_tokens_seen": 74186752,
"step": 283
},
{
"epoch": 0.06387764282501125,
"grad_norm": 0.36312419176101685,
"learning_rate": 9.014087375955573e-05,
"loss": 1.5719,
"num_input_tokens_seen": 74448896,
"step": 284
},
{
"epoch": 0.0641025641025641,
"grad_norm": 0.3092777132987976,
"learning_rate": 9.003656854743667e-05,
"loss": 1.5536,
"num_input_tokens_seen": 74711040,
"step": 285
},
{
"epoch": 0.06432748538011696,
"grad_norm": 0.32090455293655396,
"learning_rate": 8.993177550236464e-05,
"loss": 1.4896,
"num_input_tokens_seen": 74973184,
"step": 286
},
{
"epoch": 0.06455240665766981,
"grad_norm": 0.5772414207458496,
"learning_rate": 8.982649590120982e-05,
"loss": 1.4706,
"num_input_tokens_seen": 75235328,
"step": 287
},
{
"epoch": 0.06477732793522267,
"grad_norm": 0.3795754611492157,
"learning_rate": 8.972073102677091e-05,
"loss": 1.6924,
"num_input_tokens_seen": 75497472,
"step": 288
},
{
"epoch": 0.06500224921277553,
"grad_norm": 0.3151102066040039,
"learning_rate": 8.961448216775954e-05,
"loss": 1.332,
"num_input_tokens_seen": 75759616,
"step": 289
},
{
"epoch": 0.06522717049032839,
"grad_norm": 0.36022424697875977,
"learning_rate": 8.950775061878453e-05,
"loss": 1.7878,
"num_input_tokens_seen": 76021760,
"step": 290
},
{
"epoch": 0.06545209176788123,
"grad_norm": 0.2842424809932709,
"learning_rate": 8.940053768033609e-05,
"loss": 0.8113,
"num_input_tokens_seen": 76283904,
"step": 291
},
{
"epoch": 0.0656770130454341,
"grad_norm": 0.31418102979660034,
"learning_rate": 8.92928446587701e-05,
"loss": 1.2891,
"num_input_tokens_seen": 76546048,
"step": 292
},
{
"epoch": 0.06590193432298695,
"grad_norm": 0.30106377601623535,
"learning_rate": 8.9184672866292e-05,
"loss": 1.4963,
"num_input_tokens_seen": 76808192,
"step": 293
},
{
"epoch": 0.06612685560053981,
"grad_norm": 0.3311130106449127,
"learning_rate": 8.907602362094094e-05,
"loss": 1.3054,
"num_input_tokens_seen": 77070336,
"step": 294
},
{
"epoch": 0.06635177687809267,
"grad_norm": 0.31675076484680176,
"learning_rate": 8.896689824657372e-05,
"loss": 1.4281,
"num_input_tokens_seen": 77332480,
"step": 295
},
{
"epoch": 0.06657669815564553,
"grad_norm": 0.37820813059806824,
"learning_rate": 8.885729807284856e-05,
"loss": 1.7518,
"num_input_tokens_seen": 77594624,
"step": 296
},
{
"epoch": 0.06680161943319839,
"grad_norm": 0.34154844284057617,
"learning_rate": 8.874722443520899e-05,
"loss": 1.5654,
"num_input_tokens_seen": 77856768,
"step": 297
},
{
"epoch": 0.06702654071075123,
"grad_norm": 0.6786977052688599,
"learning_rate": 8.863667867486756e-05,
"loss": 2.1024,
"num_input_tokens_seen": 78118912,
"step": 298
},
{
"epoch": 0.06725146198830409,
"grad_norm": 0.3483034372329712,
"learning_rate": 8.852566213878947e-05,
"loss": 1.5528,
"num_input_tokens_seen": 78381056,
"step": 299
},
{
"epoch": 0.06747638326585695,
"grad_norm": 0.3642946779727936,
"learning_rate": 8.841417617967618e-05,
"loss": 1.6005,
"num_input_tokens_seen": 78643200,
"step": 300
},
{
"epoch": 0.06770130454340981,
"grad_norm": 0.6624205112457275,
"learning_rate": 8.83022221559489e-05,
"loss": 1.7759,
"num_input_tokens_seen": 78905344,
"step": 301
},
{
"epoch": 0.06792622582096267,
"grad_norm": 0.39949551224708557,
"learning_rate": 8.818980143173213e-05,
"loss": 1.8293,
"num_input_tokens_seen": 79167488,
"step": 302
},
{
"epoch": 0.06815114709851552,
"grad_norm": 0.3008061647415161,
"learning_rate": 8.807691537683685e-05,
"loss": 0.9926,
"num_input_tokens_seen": 79429632,
"step": 303
},
{
"epoch": 0.06837606837606838,
"grad_norm": 0.36931100487709045,
"learning_rate": 8.796356536674403e-05,
"loss": 1.5408,
"num_input_tokens_seen": 79691776,
"step": 304
},
{
"epoch": 0.06860098965362123,
"grad_norm": 0.30574414134025574,
"learning_rate": 8.784975278258783e-05,
"loss": 1.5729,
"num_input_tokens_seen": 79953920,
"step": 305
},
{
"epoch": 0.06882591093117409,
"grad_norm": 0.30279430747032166,
"learning_rate": 8.773547901113862e-05,
"loss": 1.507,
"num_input_tokens_seen": 80216064,
"step": 306
},
{
"epoch": 0.06905083220872694,
"grad_norm": 0.4457983672618866,
"learning_rate": 8.762074544478623e-05,
"loss": 1.4149,
"num_input_tokens_seen": 80478208,
"step": 307
},
{
"epoch": 0.0692757534862798,
"grad_norm": 0.4079945683479309,
"learning_rate": 8.750555348152298e-05,
"loss": 1.6881,
"num_input_tokens_seen": 80740352,
"step": 308
},
{
"epoch": 0.06950067476383266,
"grad_norm": 0.34336355328559875,
"learning_rate": 8.73899045249266e-05,
"loss": 1.4145,
"num_input_tokens_seen": 81002496,
"step": 309
},
{
"epoch": 0.06972559604138552,
"grad_norm": 0.31312716007232666,
"learning_rate": 8.727379998414311e-05,
"loss": 1.744,
"num_input_tokens_seen": 81264640,
"step": 310
},
{
"epoch": 0.06995051731893837,
"grad_norm": 0.4761490225791931,
"learning_rate": 8.715724127386972e-05,
"loss": 1.5897,
"num_input_tokens_seen": 81526784,
"step": 311
},
{
"epoch": 0.07017543859649122,
"grad_norm": 0.29893723130226135,
"learning_rate": 8.70402298143375e-05,
"loss": 1.7682,
"num_input_tokens_seen": 81788928,
"step": 312
},
{
"epoch": 0.07040035987404408,
"grad_norm": 0.3314342796802521,
"learning_rate": 8.692276703129421e-05,
"loss": 1.5067,
"num_input_tokens_seen": 82051072,
"step": 313
},
{
"epoch": 0.07062528115159694,
"grad_norm": 0.24790574610233307,
"learning_rate": 8.680485435598673e-05,
"loss": 1.15,
"num_input_tokens_seen": 82313216,
"step": 314
},
{
"epoch": 0.0708502024291498,
"grad_norm": 0.2895430326461792,
"learning_rate": 8.668649322514382e-05,
"loss": 1.3509,
"num_input_tokens_seen": 82575360,
"step": 315
},
{
"epoch": 0.07107512370670266,
"grad_norm": 0.3589169681072235,
"learning_rate": 8.656768508095853e-05,
"loss": 1.8632,
"num_input_tokens_seen": 82837504,
"step": 316
},
{
"epoch": 0.07130004498425552,
"grad_norm": 0.3887437582015991,
"learning_rate": 8.644843137107059e-05,
"loss": 1.395,
"num_input_tokens_seen": 83099648,
"step": 317
},
{
"epoch": 0.07152496626180836,
"grad_norm": 0.37343311309814453,
"learning_rate": 8.63287335485488e-05,
"loss": 1.7117,
"num_input_tokens_seen": 83361792,
"step": 318
},
{
"epoch": 0.07174988753936122,
"grad_norm": 0.3545985519886017,
"learning_rate": 8.620859307187339e-05,
"loss": 1.9533,
"num_input_tokens_seen": 83623936,
"step": 319
},
{
"epoch": 0.07197480881691408,
"grad_norm": 0.38480547070503235,
"learning_rate": 8.608801140491811e-05,
"loss": 1.2177,
"num_input_tokens_seen": 83886080,
"step": 320
},
{
"epoch": 0.07219973009446694,
"grad_norm": 0.27238497138023376,
"learning_rate": 8.596699001693255e-05,
"loss": 1.7272,
"num_input_tokens_seen": 84148224,
"step": 321
},
{
"epoch": 0.0724246513720198,
"grad_norm": 0.3463023602962494,
"learning_rate": 8.584553038252414e-05,
"loss": 1.9201,
"num_input_tokens_seen": 84410368,
"step": 322
},
{
"epoch": 0.07264957264957266,
"grad_norm": 0.4262174665927887,
"learning_rate": 8.572363398164017e-05,
"loss": 1.8208,
"num_input_tokens_seen": 84672512,
"step": 323
},
{
"epoch": 0.0728744939271255,
"grad_norm": 0.2797011733055115,
"learning_rate": 8.560130229954984e-05,
"loss": 1.4258,
"num_input_tokens_seen": 84934656,
"step": 324
},
{
"epoch": 0.07309941520467836,
"grad_norm": 0.3334251940250397,
"learning_rate": 8.547853682682604e-05,
"loss": 1.0541,
"num_input_tokens_seen": 85196800,
"step": 325
},
{
"epoch": 0.07332433648223122,
"grad_norm": 0.2976478636264801,
"learning_rate": 8.535533905932738e-05,
"loss": 1.3189,
"num_input_tokens_seen": 85458944,
"step": 326
},
{
"epoch": 0.07354925775978408,
"grad_norm": 0.37758395075798035,
"learning_rate": 8.523171049817974e-05,
"loss": 1.5492,
"num_input_tokens_seen": 85721088,
"step": 327
},
{
"epoch": 0.07377417903733693,
"grad_norm": 0.3429315388202667,
"learning_rate": 8.510765264975813e-05,
"loss": 1.0107,
"num_input_tokens_seen": 85983232,
"step": 328
},
{
"epoch": 0.07399910031488979,
"grad_norm": 0.2990742027759552,
"learning_rate": 8.498316702566828e-05,
"loss": 1.4943,
"num_input_tokens_seen": 86245376,
"step": 329
},
{
"epoch": 0.07422402159244265,
"grad_norm": 0.32602354884147644,
"learning_rate": 8.485825514272824e-05,
"loss": 1.5812,
"num_input_tokens_seen": 86507520,
"step": 330
},
{
"epoch": 0.0744489428699955,
"grad_norm": 0.26363345980644226,
"learning_rate": 8.473291852294987e-05,
"loss": 1.3136,
"num_input_tokens_seen": 86769664,
"step": 331
},
{
"epoch": 0.07467386414754835,
"grad_norm": 0.2116805911064148,
"learning_rate": 8.460715869352035e-05,
"loss": 1.2319,
"num_input_tokens_seen": 87031808,
"step": 332
},
{
"epoch": 0.07489878542510121,
"grad_norm": 0.3987019956111908,
"learning_rate": 8.44809771867835e-05,
"loss": 1.5046,
"num_input_tokens_seen": 87293952,
"step": 333
},
{
"epoch": 0.07512370670265407,
"grad_norm": 0.3598376214504242,
"learning_rate": 8.435437554022115e-05,
"loss": 1.9093,
"num_input_tokens_seen": 87556096,
"step": 334
},
{
"epoch": 0.07534862798020693,
"grad_norm": 0.24704904854297638,
"learning_rate": 8.422735529643444e-05,
"loss": 1.0568,
"num_input_tokens_seen": 87818240,
"step": 335
},
{
"epoch": 0.07557354925775979,
"grad_norm": 0.4237552881240845,
"learning_rate": 8.409991800312493e-05,
"loss": 1.9913,
"num_input_tokens_seen": 88080384,
"step": 336
},
{
"epoch": 0.07579847053531263,
"grad_norm": 0.29798340797424316,
"learning_rate": 8.397206521307584e-05,
"loss": 1.1693,
"num_input_tokens_seen": 88342528,
"step": 337
},
{
"epoch": 0.07602339181286549,
"grad_norm": 0.2940386235713959,
"learning_rate": 8.384379848413304e-05,
"loss": 1.3721,
"num_input_tokens_seen": 88604672,
"step": 338
},
{
"epoch": 0.07624831309041835,
"grad_norm": 0.393265962600708,
"learning_rate": 8.371511937918616e-05,
"loss": 0.7533,
"num_input_tokens_seen": 88866816,
"step": 339
},
{
"epoch": 0.07647323436797121,
"grad_norm": 0.36260786652565,
"learning_rate": 8.358602946614951e-05,
"loss": 1.481,
"num_input_tokens_seen": 89128960,
"step": 340
},
{
"epoch": 0.07669815564552407,
"grad_norm": 0.3836679756641388,
"learning_rate": 8.345653031794292e-05,
"loss": 1.182,
"num_input_tokens_seen": 89391104,
"step": 341
},
{
"epoch": 0.07692307692307693,
"grad_norm": 0.3021415174007416,
"learning_rate": 8.332662351247262e-05,
"loss": 1.317,
"num_input_tokens_seen": 89653248,
"step": 342
},
{
"epoch": 0.07714799820062979,
"grad_norm": 0.3135571777820587,
"learning_rate": 8.319631063261209e-05,
"loss": 1.4725,
"num_input_tokens_seen": 89915392,
"step": 343
},
{
"epoch": 0.07737291947818263,
"grad_norm": 0.3524462580680847,
"learning_rate": 8.306559326618259e-05,
"loss": 1.8325,
"num_input_tokens_seen": 90177536,
"step": 344
},
{
"epoch": 0.07759784075573549,
"grad_norm": 0.3361789584159851,
"learning_rate": 8.293447300593402e-05,
"loss": 1.4591,
"num_input_tokens_seen": 90439680,
"step": 345
},
{
"epoch": 0.07782276203328835,
"grad_norm": 0.3886992335319519,
"learning_rate": 8.280295144952536e-05,
"loss": 1.2393,
"num_input_tokens_seen": 90701824,
"step": 346
},
{
"epoch": 0.0780476833108412,
"grad_norm": 0.28349190950393677,
"learning_rate": 8.267103019950529e-05,
"loss": 1.7775,
"num_input_tokens_seen": 90963968,
"step": 347
},
{
"epoch": 0.07827260458839407,
"grad_norm": 0.3164171874523163,
"learning_rate": 8.253871086329255e-05,
"loss": 1.268,
"num_input_tokens_seen": 91226112,
"step": 348
},
{
"epoch": 0.07849752586594692,
"grad_norm": 0.9120920300483704,
"learning_rate": 8.240599505315655e-05,
"loss": 1.5009,
"num_input_tokens_seen": 91488256,
"step": 349
},
{
"epoch": 0.07872244714349977,
"grad_norm": 0.398485392332077,
"learning_rate": 8.227288438619754e-05,
"loss": 1.5468,
"num_input_tokens_seen": 91750400,
"step": 350
},
{
"epoch": 0.07894736842105263,
"grad_norm": 0.27732348442077637,
"learning_rate": 8.213938048432697e-05,
"loss": 1.5131,
"num_input_tokens_seen": 92012544,
"step": 351
},
{
"epoch": 0.07917228969860549,
"grad_norm": 0.3714882731437683,
"learning_rate": 8.200548497424778e-05,
"loss": 1.2599,
"num_input_tokens_seen": 92274688,
"step": 352
},
{
"epoch": 0.07939721097615834,
"grad_norm": 0.48692452907562256,
"learning_rate": 8.18711994874345e-05,
"loss": 1.6234,
"num_input_tokens_seen": 92536832,
"step": 353
},
{
"epoch": 0.0796221322537112,
"grad_norm": 0.6241424083709717,
"learning_rate": 8.173652566011338e-05,
"loss": 1.4543,
"num_input_tokens_seen": 92798976,
"step": 354
},
{
"epoch": 0.07984705353126406,
"grad_norm": 0.29428744316101074,
"learning_rate": 8.160146513324254e-05,
"loss": 1.3101,
"num_input_tokens_seen": 93061120,
"step": 355
},
{
"epoch": 0.08007197480881692,
"grad_norm": 0.28312620520591736,
"learning_rate": 8.146601955249188e-05,
"loss": 1.691,
"num_input_tokens_seen": 93323264,
"step": 356
},
{
"epoch": 0.08029689608636977,
"grad_norm": 0.3287985920906067,
"learning_rate": 8.133019056822304e-05,
"loss": 1.4051,
"num_input_tokens_seen": 93585408,
"step": 357
},
{
"epoch": 0.08052181736392262,
"grad_norm": 0.8355076909065247,
"learning_rate": 8.119397983546932e-05,
"loss": 0.8261,
"num_input_tokens_seen": 93847552,
"step": 358
},
{
"epoch": 0.08074673864147548,
"grad_norm": 0.4442685544490814,
"learning_rate": 8.105738901391552e-05,
"loss": 1.9674,
"num_input_tokens_seen": 94109696,
"step": 359
},
{
"epoch": 0.08097165991902834,
"grad_norm": 0.39171215891838074,
"learning_rate": 8.09204197678777e-05,
"loss": 1.4591,
"num_input_tokens_seen": 94371840,
"step": 360
},
{
"epoch": 0.0811965811965812,
"grad_norm": 0.5869056582450867,
"learning_rate": 8.07830737662829e-05,
"loss": 1.6919,
"num_input_tokens_seen": 94633984,
"step": 361
},
{
"epoch": 0.08142150247413406,
"grad_norm": 0.3079814910888672,
"learning_rate": 8.064535268264883e-05,
"loss": 1.2014,
"num_input_tokens_seen": 94896128,
"step": 362
},
{
"epoch": 0.0816464237516869,
"grad_norm": 0.40882888436317444,
"learning_rate": 8.05072581950634e-05,
"loss": 2.0127,
"num_input_tokens_seen": 95158272,
"step": 363
},
{
"epoch": 0.08187134502923976,
"grad_norm": 0.3157702088356018,
"learning_rate": 8.036879198616434e-05,
"loss": 1.5946,
"num_input_tokens_seen": 95420416,
"step": 364
},
{
"epoch": 0.08209626630679262,
"grad_norm": 0.2747892439365387,
"learning_rate": 8.022995574311876e-05,
"loss": 1.3058,
"num_input_tokens_seen": 95682560,
"step": 365
},
{
"epoch": 0.08232118758434548,
"grad_norm": 0.2914641499519348,
"learning_rate": 8.009075115760243e-05,
"loss": 1.3948,
"num_input_tokens_seen": 95944704,
"step": 366
},
{
"epoch": 0.08254610886189834,
"grad_norm": 0.43341168761253357,
"learning_rate": 7.99511799257793e-05,
"loss": 1.923,
"num_input_tokens_seen": 96206848,
"step": 367
},
{
"epoch": 0.0827710301394512,
"grad_norm": 0.3780982792377472,
"learning_rate": 7.98112437482808e-05,
"loss": 1.949,
"num_input_tokens_seen": 96468992,
"step": 368
},
{
"epoch": 0.08299595141700405,
"grad_norm": 0.3152284622192383,
"learning_rate": 7.967094433018508e-05,
"loss": 1.8534,
"num_input_tokens_seen": 96731136,
"step": 369
},
{
"epoch": 0.0832208726945569,
"grad_norm": 0.41052770614624023,
"learning_rate": 7.953028338099627e-05,
"loss": 1.5833,
"num_input_tokens_seen": 96993280,
"step": 370
},
{
"epoch": 0.08344579397210976,
"grad_norm": 0.2482951134443283,
"learning_rate": 7.938926261462366e-05,
"loss": 0.9037,
"num_input_tokens_seen": 97255424,
"step": 371
},
{
"epoch": 0.08367071524966262,
"grad_norm": 0.2243509441614151,
"learning_rate": 7.924788374936078e-05,
"loss": 1.1509,
"num_input_tokens_seen": 97517568,
"step": 372
},
{
"epoch": 0.08389563652721548,
"grad_norm": 0.4592084288597107,
"learning_rate": 7.910614850786448e-05,
"loss": 1.454,
"num_input_tokens_seen": 97779712,
"step": 373
},
{
"epoch": 0.08412055780476833,
"grad_norm": 0.46746399998664856,
"learning_rate": 7.896405861713394e-05,
"loss": 1.883,
"num_input_tokens_seen": 98041856,
"step": 374
},
{
"epoch": 0.08434547908232119,
"grad_norm": 0.32724547386169434,
"learning_rate": 7.882161580848967e-05,
"loss": 1.892,
"num_input_tokens_seen": 98304000,
"step": 375
},
{
"epoch": 0.08457040035987404,
"grad_norm": 0.3034023642539978,
"learning_rate": 7.86788218175523e-05,
"loss": 1.6046,
"num_input_tokens_seen": 98566144,
"step": 376
},
{
"epoch": 0.0847953216374269,
"grad_norm": 0.3898322880268097,
"learning_rate": 7.85356783842216e-05,
"loss": 1.4225,
"num_input_tokens_seen": 98828288,
"step": 377
},
{
"epoch": 0.08502024291497975,
"grad_norm": 0.3080570101737976,
"learning_rate": 7.839218725265506e-05,
"loss": 2.099,
"num_input_tokens_seen": 99090432,
"step": 378
},
{
"epoch": 0.08524516419253261,
"grad_norm": 0.3034183084964752,
"learning_rate": 7.82483501712469e-05,
"loss": 2.0276,
"num_input_tokens_seen": 99352576,
"step": 379
},
{
"epoch": 0.08547008547008547,
"grad_norm": 0.2619307339191437,
"learning_rate": 7.810416889260653e-05,
"loss": 1.1334,
"num_input_tokens_seen": 99614720,
"step": 380
},
{
"epoch": 0.08569500674763833,
"grad_norm": 0.31621718406677246,
"learning_rate": 7.795964517353735e-05,
"loss": 1.0219,
"num_input_tokens_seen": 99876864,
"step": 381
},
{
"epoch": 0.08591992802519119,
"grad_norm": 0.3380356729030609,
"learning_rate": 7.781478077501525e-05,
"loss": 1.203,
"num_input_tokens_seen": 100139008,
"step": 382
},
{
"epoch": 0.08614484930274403,
"grad_norm": 0.3040500581264496,
"learning_rate": 7.766957746216721e-05,
"loss": 1.4343,
"num_input_tokens_seen": 100401152,
"step": 383
},
{
"epoch": 0.08636977058029689,
"grad_norm": 0.2802172899246216,
"learning_rate": 7.752403700424979e-05,
"loss": 1.6222,
"num_input_tokens_seen": 100663296,
"step": 384
},
{
"epoch": 0.08659469185784975,
"grad_norm": 0.3306577503681183,
"learning_rate": 7.737816117462752e-05,
"loss": 1.8542,
"num_input_tokens_seen": 100925440,
"step": 385
},
{
"epoch": 0.08681961313540261,
"grad_norm": 0.30289724469184875,
"learning_rate": 7.723195175075136e-05,
"loss": 1.3011,
"num_input_tokens_seen": 101187584,
"step": 386
},
{
"epoch": 0.08704453441295547,
"grad_norm": 0.2980553209781647,
"learning_rate": 7.7085410514137e-05,
"loss": 1.6361,
"num_input_tokens_seen": 101449728,
"step": 387
},
{
"epoch": 0.08726945569050833,
"grad_norm": 0.21399402618408203,
"learning_rate": 7.693853925034315e-05,
"loss": 1.1644,
"num_input_tokens_seen": 101711872,
"step": 388
},
{
"epoch": 0.08749437696806119,
"grad_norm": 0.43986937403678894,
"learning_rate": 7.679133974894983e-05,
"loss": 1.8904,
"num_input_tokens_seen": 101974016,
"step": 389
},
{
"epoch": 0.08771929824561403,
"grad_norm": 0.31275415420532227,
"learning_rate": 7.66438138035365e-05,
"loss": 2.0859,
"num_input_tokens_seen": 102236160,
"step": 390
},
{
"epoch": 0.08794421952316689,
"grad_norm": 0.2997763454914093,
"learning_rate": 7.649596321166024e-05,
"loss": 1.5338,
"num_input_tokens_seen": 102498304,
"step": 391
},
{
"epoch": 0.08816914080071975,
"grad_norm": 0.2745038866996765,
"learning_rate": 7.634778977483389e-05,
"loss": 1.0397,
"num_input_tokens_seen": 102760448,
"step": 392
},
{
"epoch": 0.0883940620782726,
"grad_norm": 0.33330515027046204,
"learning_rate": 7.619929529850397e-05,
"loss": 1.5318,
"num_input_tokens_seen": 103022592,
"step": 393
},
{
"epoch": 0.08861898335582546,
"grad_norm": 0.3642829656600952,
"learning_rate": 7.605048159202883e-05,
"loss": 1.4536,
"num_input_tokens_seen": 103284736,
"step": 394
},
{
"epoch": 0.08884390463337832,
"grad_norm": 0.44559112191200256,
"learning_rate": 7.590135046865651e-05,
"loss": 1.888,
"num_input_tokens_seen": 103546880,
"step": 395
},
{
"epoch": 0.08906882591093117,
"grad_norm": 0.3070293962955475,
"learning_rate": 7.575190374550272e-05,
"loss": 1.288,
"num_input_tokens_seen": 103809024,
"step": 396
},
{
"epoch": 0.08929374718848403,
"grad_norm": 0.3520009517669678,
"learning_rate": 7.560214324352858e-05,
"loss": 1.3505,
"num_input_tokens_seen": 104071168,
"step": 397
},
{
"epoch": 0.08951866846603689,
"grad_norm": 0.3868756890296936,
"learning_rate": 7.545207078751857e-05,
"loss": 1.9781,
"num_input_tokens_seen": 104333312,
"step": 398
},
{
"epoch": 0.08974358974358974,
"grad_norm": 0.29797691106796265,
"learning_rate": 7.530168820605818e-05,
"loss": 1.5291,
"num_input_tokens_seen": 104595456,
"step": 399
},
{
"epoch": 0.0899685110211426,
"grad_norm": 0.5068181157112122,
"learning_rate": 7.515099733151177e-05,
"loss": 1.6532,
"num_input_tokens_seen": 104857600,
"step": 400
},
{
"epoch": 0.09019343229869546,
"grad_norm": 0.4907291531562805,
"learning_rate": 7.500000000000001e-05,
"loss": 1.9233,
"num_input_tokens_seen": 105119744,
"step": 401
},
{
"epoch": 0.09041835357624832,
"grad_norm": 0.26997601985931396,
"learning_rate": 7.484869805137778e-05,
"loss": 1.3426,
"num_input_tokens_seen": 105381888,
"step": 402
},
{
"epoch": 0.09064327485380116,
"grad_norm": 0.31390297412872314,
"learning_rate": 7.469709332921155e-05,
"loss": 1.9555,
"num_input_tokens_seen": 105644032,
"step": 403
},
{
"epoch": 0.09086819613135402,
"grad_norm": 0.23061801493167877,
"learning_rate": 7.454518768075704e-05,
"loss": 1.3306,
"num_input_tokens_seen": 105906176,
"step": 404
},
{
"epoch": 0.09109311740890688,
"grad_norm": 0.2531485855579376,
"learning_rate": 7.439298295693665e-05,
"loss": 1.2648,
"num_input_tokens_seen": 106168320,
"step": 405
},
{
"epoch": 0.09131803868645974,
"grad_norm": 0.3596000373363495,
"learning_rate": 7.424048101231686e-05,
"loss": 1.5472,
"num_input_tokens_seen": 106430464,
"step": 406
},
{
"epoch": 0.0915429599640126,
"grad_norm": 0.2629176676273346,
"learning_rate": 7.408768370508576e-05,
"loss": 1.351,
"num_input_tokens_seen": 106692608,
"step": 407
},
{
"epoch": 0.09176788124156546,
"grad_norm": 0.3580552637577057,
"learning_rate": 7.393459289703035e-05,
"loss": 1.7573,
"num_input_tokens_seen": 106954752,
"step": 408
},
{
"epoch": 0.0919928025191183,
"grad_norm": 0.3975083827972412,
"learning_rate": 7.378121045351378e-05,
"loss": 1.6161,
"num_input_tokens_seen": 107216896,
"step": 409
},
{
"epoch": 0.09221772379667116,
"grad_norm": 0.28635743260383606,
"learning_rate": 7.362753824345272e-05,
"loss": 1.38,
"num_input_tokens_seen": 107479040,
"step": 410
},
{
"epoch": 0.09244264507422402,
"grad_norm": 0.37943241000175476,
"learning_rate": 7.347357813929454e-05,
"loss": 1.648,
"num_input_tokens_seen": 107741184,
"step": 411
},
{
"epoch": 0.09266756635177688,
"grad_norm": 0.27485769987106323,
"learning_rate": 7.331933201699457e-05,
"loss": 1.5141,
"num_input_tokens_seen": 108003328,
"step": 412
},
{
"epoch": 0.09289248762932974,
"grad_norm": 0.3419206440448761,
"learning_rate": 7.316480175599309e-05,
"loss": 1.2814,
"num_input_tokens_seen": 108265472,
"step": 413
},
{
"epoch": 0.0931174089068826,
"grad_norm": 0.2868017554283142,
"learning_rate": 7.300998923919259e-05,
"loss": 1.7768,
"num_input_tokens_seen": 108527616,
"step": 414
},
{
"epoch": 0.09334233018443545,
"grad_norm": 0.49058419466018677,
"learning_rate": 7.285489635293472e-05,
"loss": 1.9471,
"num_input_tokens_seen": 108789760,
"step": 415
},
{
"epoch": 0.0935672514619883,
"grad_norm": 0.26313817501068115,
"learning_rate": 7.269952498697734e-05,
"loss": 1.2005,
"num_input_tokens_seen": 109051904,
"step": 416
},
{
"epoch": 0.09379217273954116,
"grad_norm": 0.3596467673778534,
"learning_rate": 7.254387703447154e-05,
"loss": 1.6942,
"num_input_tokens_seen": 109314048,
"step": 417
},
{
"epoch": 0.09401709401709402,
"grad_norm": 0.2295425832271576,
"learning_rate": 7.238795439193848e-05,
"loss": 1.0696,
"num_input_tokens_seen": 109576192,
"step": 418
},
{
"epoch": 0.09424201529464687,
"grad_norm": 0.2795259356498718,
"learning_rate": 7.223175895924638e-05,
"loss": 1.602,
"num_input_tokens_seen": 109838336,
"step": 419
},
{
"epoch": 0.09446693657219973,
"grad_norm": 0.30435189604759216,
"learning_rate": 7.207529263958726e-05,
"loss": 1.6747,
"num_input_tokens_seen": 110100480,
"step": 420
},
{
"epoch": 0.09469185784975259,
"grad_norm": 0.33711546659469604,
"learning_rate": 7.191855733945387e-05,
"loss": 1.7159,
"num_input_tokens_seen": 110362624,
"step": 421
},
{
"epoch": 0.09491677912730544,
"grad_norm": 0.276668518781662,
"learning_rate": 7.176155496861638e-05,
"loss": 1.4798,
"num_input_tokens_seen": 110624768,
"step": 422
},
{
"epoch": 0.0951417004048583,
"grad_norm": 0.46163132786750793,
"learning_rate": 7.160428744009912e-05,
"loss": 1.9584,
"num_input_tokens_seen": 110886912,
"step": 423
},
{
"epoch": 0.09536662168241115,
"grad_norm": 0.2237766981124878,
"learning_rate": 7.14467566701573e-05,
"loss": 1.2179,
"num_input_tokens_seen": 111149056,
"step": 424
},
{
"epoch": 0.09559154295996401,
"grad_norm": 0.40620312094688416,
"learning_rate": 7.128896457825364e-05,
"loss": 1.905,
"num_input_tokens_seen": 111411200,
"step": 425
},
{
"epoch": 0.09581646423751687,
"grad_norm": 0.3632362186908722,
"learning_rate": 7.113091308703498e-05,
"loss": 1.6545,
"num_input_tokens_seen": 111673344,
"step": 426
},
{
"epoch": 0.09604138551506973,
"grad_norm": 0.31227433681488037,
"learning_rate": 7.097260412230886e-05,
"loss": 1.7099,
"num_input_tokens_seen": 111935488,
"step": 427
},
{
"epoch": 0.09626630679262259,
"grad_norm": 0.5832061767578125,
"learning_rate": 7.081403961302006e-05,
"loss": 2.1903,
"num_input_tokens_seen": 112197632,
"step": 428
},
{
"epoch": 0.09649122807017543,
"grad_norm": 0.26980289816856384,
"learning_rate": 7.06552214912271e-05,
"loss": 1.3692,
"num_input_tokens_seen": 112459776,
"step": 429
},
{
"epoch": 0.09671614934772829,
"grad_norm": 0.295695960521698,
"learning_rate": 7.049615169207864e-05,
"loss": 1.5653,
"num_input_tokens_seen": 112721920,
"step": 430
},
{
"epoch": 0.09694107062528115,
"grad_norm": 0.34605613350868225,
"learning_rate": 7.033683215379002e-05,
"loss": 1.5236,
"num_input_tokens_seen": 112984064,
"step": 431
},
{
"epoch": 0.09716599190283401,
"grad_norm": 0.274335652589798,
"learning_rate": 7.017726481761951e-05,
"loss": 1.356,
"num_input_tokens_seen": 113246208,
"step": 432
},
{
"epoch": 0.09739091318038687,
"grad_norm": 0.3841763734817505,
"learning_rate": 7.001745162784477e-05,
"loss": 1.2775,
"num_input_tokens_seen": 113508352,
"step": 433
},
{
"epoch": 0.09761583445793973,
"grad_norm": 0.38481906056404114,
"learning_rate": 6.985739453173903e-05,
"loss": 1.2148,
"num_input_tokens_seen": 113770496,
"step": 434
},
{
"epoch": 0.09784075573549257,
"grad_norm": 0.5611907839775085,
"learning_rate": 6.969709547954756e-05,
"loss": 1.7437,
"num_input_tokens_seen": 114032640,
"step": 435
},
{
"epoch": 0.09806567701304543,
"grad_norm": 0.3166041076183319,
"learning_rate": 6.953655642446368e-05,
"loss": 1.1888,
"num_input_tokens_seen": 114294784,
"step": 436
},
{
"epoch": 0.09829059829059829,
"grad_norm": 0.31839755177497864,
"learning_rate": 6.937577932260515e-05,
"loss": 1.4222,
"num_input_tokens_seen": 114556928,
"step": 437
},
{
"epoch": 0.09851551956815115,
"grad_norm": 0.34371787309646606,
"learning_rate": 6.921476613299018e-05,
"loss": 1.4687,
"num_input_tokens_seen": 114819072,
"step": 438
},
{
"epoch": 0.098740440845704,
"grad_norm": 0.23597583174705505,
"learning_rate": 6.905351881751372e-05,
"loss": 0.9579,
"num_input_tokens_seen": 115081216,
"step": 439
},
{
"epoch": 0.09896536212325686,
"grad_norm": 0.35465583205223083,
"learning_rate": 6.889203934092336e-05,
"loss": 1.765,
"num_input_tokens_seen": 115343360,
"step": 440
},
{
"epoch": 0.09919028340080972,
"grad_norm": 0.6744416356086731,
"learning_rate": 6.873032967079561e-05,
"loss": 1.6383,
"num_input_tokens_seen": 115605504,
"step": 441
},
{
"epoch": 0.09941520467836257,
"grad_norm": 0.27794769406318665,
"learning_rate": 6.856839177751176e-05,
"loss": 1.0429,
"num_input_tokens_seen": 115867648,
"step": 442
},
{
"epoch": 0.09964012595591543,
"grad_norm": 0.3875754773616791,
"learning_rate": 6.840622763423391e-05,
"loss": 1.2993,
"num_input_tokens_seen": 116129792,
"step": 443
},
{
"epoch": 0.09986504723346828,
"grad_norm": 0.32801422476768494,
"learning_rate": 6.824383921688098e-05,
"loss": 1.7491,
"num_input_tokens_seen": 116391936,
"step": 444
},
{
"epoch": 0.10008996851102114,
"grad_norm": 0.46113693714141846,
"learning_rate": 6.808122850410461e-05,
"loss": 1.2563,
"num_input_tokens_seen": 116654080,
"step": 445
},
{
"epoch": 0.100314889788574,
"grad_norm": 0.3193335235118866,
"learning_rate": 6.7918397477265e-05,
"loss": 1.5971,
"num_input_tokens_seen": 116916224,
"step": 446
},
{
"epoch": 0.10053981106612686,
"grad_norm": 1.1562225818634033,
"learning_rate": 6.775534812040685e-05,
"loss": 1.1216,
"num_input_tokens_seen": 117178368,
"step": 447
},
{
"epoch": 0.1007647323436797,
"grad_norm": 0.8205109238624573,
"learning_rate": 6.759208242023509e-05,
"loss": 2.271,
"num_input_tokens_seen": 117440512,
"step": 448
},
{
"epoch": 0.10098965362123256,
"grad_norm": 0.23773515224456787,
"learning_rate": 6.742860236609077e-05,
"loss": 1.0239,
"num_input_tokens_seen": 117702656,
"step": 449
},
{
"epoch": 0.10121457489878542,
"grad_norm": 0.4301644265651703,
"learning_rate": 6.726490994992674e-05,
"loss": 1.8923,
"num_input_tokens_seen": 117964800,
"step": 450
},
{
"epoch": 0.10143949617633828,
"grad_norm": 0.4098486602306366,
"learning_rate": 6.710100716628344e-05,
"loss": 1.8184,
"num_input_tokens_seen": 118226944,
"step": 451
},
{
"epoch": 0.10166441745389114,
"grad_norm": 0.4366002678871155,
"learning_rate": 6.693689601226458e-05,
"loss": 1.5744,
"num_input_tokens_seen": 118489088,
"step": 452
},
{
"epoch": 0.101889338731444,
"grad_norm": 0.3160037100315094,
"learning_rate": 6.677257848751277e-05,
"loss": 0.9482,
"num_input_tokens_seen": 118751232,
"step": 453
},
{
"epoch": 0.10211426000899686,
"grad_norm": 0.3118400573730469,
"learning_rate": 6.660805659418516e-05,
"loss": 1.3443,
"num_input_tokens_seen": 119013376,
"step": 454
},
{
"epoch": 0.1023391812865497,
"grad_norm": 0.35094577074050903,
"learning_rate": 6.644333233692916e-05,
"loss": 1.5801,
"num_input_tokens_seen": 119275520,
"step": 455
},
{
"epoch": 0.10256410256410256,
"grad_norm": 1.0213929414749146,
"learning_rate": 6.627840772285784e-05,
"loss": 1.4641,
"num_input_tokens_seen": 119537664,
"step": 456
},
{
"epoch": 0.10278902384165542,
"grad_norm": 0.3663333058357239,
"learning_rate": 6.611328476152557e-05,
"loss": 1.7087,
"num_input_tokens_seen": 119799808,
"step": 457
},
{
"epoch": 0.10301394511920828,
"grad_norm": 0.2802916467189789,
"learning_rate": 6.594796546490351e-05,
"loss": 1.4823,
"num_input_tokens_seen": 120061952,
"step": 458
},
{
"epoch": 0.10323886639676114,
"grad_norm": 0.2548016607761383,
"learning_rate": 6.578245184735513e-05,
"loss": 1.8112,
"num_input_tokens_seen": 120324096,
"step": 459
},
{
"epoch": 0.103463787674314,
"grad_norm": 0.31046029925346375,
"learning_rate": 6.561674592561163e-05,
"loss": 1.293,
"num_input_tokens_seen": 120586240,
"step": 460
},
{
"epoch": 0.10368870895186684,
"grad_norm": 0.23766155540943146,
"learning_rate": 6.545084971874738e-05,
"loss": 1.3626,
"num_input_tokens_seen": 120848384,
"step": 461
},
{
"epoch": 0.1039136302294197,
"grad_norm": 0.7904400825500488,
"learning_rate": 6.528476524815528e-05,
"loss": 1.1202,
"num_input_tokens_seen": 121110528,
"step": 462
},
{
"epoch": 0.10413855150697256,
"grad_norm": 0.34406253695487976,
"learning_rate": 6.511849453752223e-05,
"loss": 1.9536,
"num_input_tokens_seen": 121372672,
"step": 463
},
{
"epoch": 0.10436347278452542,
"grad_norm": 0.3135795295238495,
"learning_rate": 6.495203961280434e-05,
"loss": 1.7353,
"num_input_tokens_seen": 121634816,
"step": 464
},
{
"epoch": 0.10458839406207827,
"grad_norm": 0.29938840866088867,
"learning_rate": 6.478540250220234e-05,
"loss": 1.4572,
"num_input_tokens_seen": 121896960,
"step": 465
},
{
"epoch": 0.10481331533963113,
"grad_norm": 0.35105428099632263,
"learning_rate": 6.461858523613684e-05,
"loss": 1.4649,
"num_input_tokens_seen": 122159104,
"step": 466
},
{
"epoch": 0.10503823661718399,
"grad_norm": 0.4943515956401825,
"learning_rate": 6.445158984722358e-05,
"loss": 1.4881,
"num_input_tokens_seen": 122421248,
"step": 467
},
{
"epoch": 0.10526315789473684,
"grad_norm": 0.23201408982276917,
"learning_rate": 6.428441837024868e-05,
"loss": 0.8936,
"num_input_tokens_seen": 122683392,
"step": 468
},
{
"epoch": 0.1054880791722897,
"grad_norm": 0.28961750864982605,
"learning_rate": 6.411707284214384e-05,
"loss": 1.311,
"num_input_tokens_seen": 122945536,
"step": 469
},
{
"epoch": 0.10571300044984255,
"grad_norm": 0.41510218381881714,
"learning_rate": 6.394955530196147e-05,
"loss": 1.3483,
"num_input_tokens_seen": 123207680,
"step": 470
},
{
"epoch": 0.10593792172739541,
"grad_norm": 0.6346913576126099,
"learning_rate": 6.378186779084995e-05,
"loss": 1.6187,
"num_input_tokens_seen": 123469824,
"step": 471
},
{
"epoch": 0.10616284300494827,
"grad_norm": 0.271835058927536,
"learning_rate": 6.361401235202872e-05,
"loss": 1.2083,
"num_input_tokens_seen": 123731968,
"step": 472
},
{
"epoch": 0.10638776428250113,
"grad_norm": 0.3942575752735138,
"learning_rate": 6.344599103076329e-05,
"loss": 1.8387,
"num_input_tokens_seen": 123994112,
"step": 473
},
{
"epoch": 0.10661268556005399,
"grad_norm": 0.3236991763114929,
"learning_rate": 6.327780587434044e-05,
"loss": 1.1073,
"num_input_tokens_seen": 124256256,
"step": 474
},
{
"epoch": 0.10683760683760683,
"grad_norm": 2.949082612991333,
"learning_rate": 6.310945893204324e-05,
"loss": 1.4841,
"num_input_tokens_seen": 124518400,
"step": 475
},
{
"epoch": 0.10706252811515969,
"grad_norm": 0.35429275035858154,
"learning_rate": 6.294095225512603e-05,
"loss": 1.4407,
"num_input_tokens_seen": 124780544,
"step": 476
},
{
"epoch": 0.10728744939271255,
"grad_norm": 0.28357911109924316,
"learning_rate": 6.277228789678953e-05,
"loss": 1.6392,
"num_input_tokens_seen": 125042688,
"step": 477
},
{
"epoch": 0.10751237067026541,
"grad_norm": 0.2541661560535431,
"learning_rate": 6.26034679121557e-05,
"loss": 1.4364,
"num_input_tokens_seen": 125304832,
"step": 478
},
{
"epoch": 0.10773729194781827,
"grad_norm": 0.29796111583709717,
"learning_rate": 6.243449435824276e-05,
"loss": 1.5294,
"num_input_tokens_seen": 125566976,
"step": 479
},
{
"epoch": 0.10796221322537113,
"grad_norm": 0.3105159401893616,
"learning_rate": 6.226536929394013e-05,
"loss": 1.6884,
"num_input_tokens_seen": 125829120,
"step": 480
},
{
"epoch": 0.10818713450292397,
"grad_norm": 0.3628823459148407,
"learning_rate": 6.209609477998338e-05,
"loss": 1.4418,
"num_input_tokens_seen": 126091264,
"step": 481
},
{
"epoch": 0.10841205578047683,
"grad_norm": 0.26390042901039124,
"learning_rate": 6.192667287892905e-05,
"loss": 1.2891,
"num_input_tokens_seen": 126353408,
"step": 482
},
{
"epoch": 0.10863697705802969,
"grad_norm": 0.8760519623756409,
"learning_rate": 6.17571056551295e-05,
"loss": 1.3306,
"num_input_tokens_seen": 126615552,
"step": 483
},
{
"epoch": 0.10886189833558255,
"grad_norm": 0.5081838369369507,
"learning_rate": 6.158739517470786e-05,
"loss": 1.5474,
"num_input_tokens_seen": 126877696,
"step": 484
},
{
"epoch": 0.1090868196131354,
"grad_norm": 0.3239939510822296,
"learning_rate": 6.141754350553279e-05,
"loss": 1.2702,
"num_input_tokens_seen": 127139840,
"step": 485
},
{
"epoch": 0.10931174089068826,
"grad_norm": 0.3140064477920532,
"learning_rate": 6.124755271719325e-05,
"loss": 1.2157,
"num_input_tokens_seen": 127401984,
"step": 486
},
{
"epoch": 0.10953666216824112,
"grad_norm": 0.5966137051582336,
"learning_rate": 6.107742488097338e-05,
"loss": 1.5058,
"num_input_tokens_seen": 127664128,
"step": 487
},
{
"epoch": 0.10976158344579397,
"grad_norm": 0.24960365891456604,
"learning_rate": 6.090716206982714e-05,
"loss": 1.6896,
"num_input_tokens_seen": 127926272,
"step": 488
},
{
"epoch": 0.10998650472334683,
"grad_norm": 0.22397436201572418,
"learning_rate": 6.073676635835317e-05,
"loss": 0.7462,
"num_input_tokens_seen": 128188416,
"step": 489
},
{
"epoch": 0.11021142600089968,
"grad_norm": 0.304304301738739,
"learning_rate": 6.056623982276944e-05,
"loss": 1.5282,
"num_input_tokens_seen": 128450560,
"step": 490
},
{
"epoch": 0.11043634727845254,
"grad_norm": 0.39173880219459534,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.2825,
"num_input_tokens_seen": 128712704,
"step": 491
},
{
"epoch": 0.1106612685560054,
"grad_norm": 0.48953327536582947,
"learning_rate": 6.0224802592089513e-05,
"loss": 1.6419,
"num_input_tokens_seen": 128974848,
"step": 492
},
{
"epoch": 0.11088618983355826,
"grad_norm": 0.29004979133605957,
"learning_rate": 6.005389605729824e-05,
"loss": 1.4553,
"num_input_tokens_seen": 129236992,
"step": 493
},
{
"epoch": 0.1111111111111111,
"grad_norm": 0.3265511691570282,
"learning_rate": 5.988286701895631e-05,
"loss": 1.0105,
"num_input_tokens_seen": 129499136,
"step": 494
},
{
"epoch": 0.11133603238866396,
"grad_norm": 0.2783220708370209,
"learning_rate": 5.97117175609986e-05,
"loss": 1.4261,
"num_input_tokens_seen": 129761280,
"step": 495
},
{
"epoch": 0.11156095366621682,
"grad_norm": 0.24198879301548004,
"learning_rate": 5.9540449768827246e-05,
"loss": 1.0924,
"num_input_tokens_seen": 130023424,
"step": 496
},
{
"epoch": 0.11178587494376968,
"grad_norm": 0.43721655011177063,
"learning_rate": 5.9369065729286245e-05,
"loss": 1.5056,
"num_input_tokens_seen": 130285568,
"step": 497
},
{
"epoch": 0.11201079622132254,
"grad_norm": 0.23090052604675293,
"learning_rate": 5.9197567530636014e-05,
"loss": 1.3582,
"num_input_tokens_seen": 130547712,
"step": 498
},
{
"epoch": 0.1122357174988754,
"grad_norm": 0.2385416179895401,
"learning_rate": 5.902595726252801e-05,
"loss": 1.0288,
"num_input_tokens_seen": 130809856,
"step": 499
},
{
"epoch": 0.11246063877642826,
"grad_norm": 0.3970996141433716,
"learning_rate": 5.885423701597917e-05,
"loss": 1.6201,
"num_input_tokens_seen": 131072000,
"step": 500
},
{
"epoch": 0.1126855600539811,
"grad_norm": 0.4963282644748688,
"learning_rate": 5.868240888334653e-05,
"loss": 1.6411,
"num_input_tokens_seen": 131334144,
"step": 501
},
{
"epoch": 0.11291048133153396,
"grad_norm": 0.24954336881637573,
"learning_rate": 5.851047495830163e-05,
"loss": 1.082,
"num_input_tokens_seen": 131596288,
"step": 502
},
{
"epoch": 0.11313540260908682,
"grad_norm": 0.26679301261901855,
"learning_rate": 5.833843733580512e-05,
"loss": 1.4837,
"num_input_tokens_seen": 131858432,
"step": 503
},
{
"epoch": 0.11336032388663968,
"grad_norm": 0.2640249729156494,
"learning_rate": 5.816629811208112e-05,
"loss": 1.1814,
"num_input_tokens_seen": 132120576,
"step": 504
},
{
"epoch": 0.11358524516419254,
"grad_norm": 1.2667120695114136,
"learning_rate": 5.799405938459175e-05,
"loss": 1.6477,
"num_input_tokens_seen": 132382720,
"step": 505
},
{
"epoch": 0.1138101664417454,
"grad_norm": 0.25769752264022827,
"learning_rate": 5.782172325201155e-05,
"loss": 1.4024,
"num_input_tokens_seen": 132644864,
"step": 506
},
{
"epoch": 0.11403508771929824,
"grad_norm": 0.34863802790641785,
"learning_rate": 5.764929181420191e-05,
"loss": 1.2889,
"num_input_tokens_seen": 132907008,
"step": 507
},
{
"epoch": 0.1142600089968511,
"grad_norm": 0.2862359881401062,
"learning_rate": 5.747676717218549e-05,
"loss": 1.2924,
"num_input_tokens_seen": 133169152,
"step": 508
},
{
"epoch": 0.11448493027440396,
"grad_norm": 0.2967565357685089,
"learning_rate": 5.730415142812059e-05,
"loss": 1.3798,
"num_input_tokens_seen": 133431296,
"step": 509
},
{
"epoch": 0.11470985155195682,
"grad_norm": 0.26725533604621887,
"learning_rate": 5.713144668527559e-05,
"loss": 1.3552,
"num_input_tokens_seen": 133693440,
"step": 510
},
{
"epoch": 0.11493477282950967,
"grad_norm": 0.3582031726837158,
"learning_rate": 5.695865504800327e-05,
"loss": 1.6537,
"num_input_tokens_seen": 133955584,
"step": 511
},
{
"epoch": 0.11515969410706253,
"grad_norm": 0.2925718426704407,
"learning_rate": 5.6785778621715225e-05,
"loss": 1.1801,
"num_input_tokens_seen": 134217728,
"step": 512
},
{
"epoch": 0.11538461538461539,
"grad_norm": 0.3479791581630707,
"learning_rate": 5.661281951285613e-05,
"loss": 1.3978,
"num_input_tokens_seen": 134479872,
"step": 513
},
{
"epoch": 0.11560953666216824,
"grad_norm": 0.4140552282333374,
"learning_rate": 5.643977982887815e-05,
"loss": 1.566,
"num_input_tokens_seen": 134742016,
"step": 514
},
{
"epoch": 0.1158344579397211,
"grad_norm": 0.3315996825695038,
"learning_rate": 5.6266661678215216e-05,
"loss": 1.6363,
"num_input_tokens_seen": 135004160,
"step": 515
},
{
"epoch": 0.11605937921727395,
"grad_norm": 0.3189332187175751,
"learning_rate": 5.6093467170257374e-05,
"loss": 1.475,
"num_input_tokens_seen": 135266304,
"step": 516
},
{
"epoch": 0.11628430049482681,
"grad_norm": 0.28339698910713196,
"learning_rate": 5.5920198415325064e-05,
"loss": 1.1303,
"num_input_tokens_seen": 135528448,
"step": 517
},
{
"epoch": 0.11650922177237967,
"grad_norm": 0.22190435230731964,
"learning_rate": 5.574685752464334e-05,
"loss": 1.3556,
"num_input_tokens_seen": 135790592,
"step": 518
},
{
"epoch": 0.11673414304993253,
"grad_norm": 0.3237098455429077,
"learning_rate": 5.557344661031627e-05,
"loss": 1.9832,
"num_input_tokens_seen": 136052736,
"step": 519
},
{
"epoch": 0.11695906432748537,
"grad_norm": 0.3498755097389221,
"learning_rate": 5.539996778530115e-05,
"loss": 1.3795,
"num_input_tokens_seen": 136314880,
"step": 520
},
{
"epoch": 0.11718398560503823,
"grad_norm": 0.2598389983177185,
"learning_rate": 5.522642316338268e-05,
"loss": 1.3192,
"num_input_tokens_seen": 136577024,
"step": 521
},
{
"epoch": 0.11740890688259109,
"grad_norm": 0.21834932267665863,
"learning_rate": 5.5052814859147315e-05,
"loss": 1.005,
"num_input_tokens_seen": 136839168,
"step": 522
},
{
"epoch": 0.11763382816014395,
"grad_norm": 0.4254181981086731,
"learning_rate": 5.487914498795747e-05,
"loss": 1.6294,
"num_input_tokens_seen": 137101312,
"step": 523
},
{
"epoch": 0.11785874943769681,
"grad_norm": 0.41052109003067017,
"learning_rate": 5.470541566592573e-05,
"loss": 1.5199,
"num_input_tokens_seen": 137363456,
"step": 524
},
{
"epoch": 0.11808367071524967,
"grad_norm": 0.21535776555538177,
"learning_rate": 5.453162900988902e-05,
"loss": 1.4093,
"num_input_tokens_seen": 137625600,
"step": 525
},
{
"epoch": 0.11830859199280253,
"grad_norm": 0.2855823040008545,
"learning_rate": 5.435778713738292e-05,
"loss": 1.7458,
"num_input_tokens_seen": 137887744,
"step": 526
},
{
"epoch": 0.11853351327035537,
"grad_norm": 0.4613701403141022,
"learning_rate": 5.418389216661579e-05,
"loss": 1.6014,
"num_input_tokens_seen": 138149888,
"step": 527
},
{
"epoch": 0.11875843454790823,
"grad_norm": 0.290487676858902,
"learning_rate": 5.4009946216442944e-05,
"loss": 1.4064,
"num_input_tokens_seen": 138412032,
"step": 528
},
{
"epoch": 0.11898335582546109,
"grad_norm": 0.3731476664543152,
"learning_rate": 5.383595140634093e-05,
"loss": 1.6748,
"num_input_tokens_seen": 138674176,
"step": 529
},
{
"epoch": 0.11920827710301395,
"grad_norm": 0.26862120628356934,
"learning_rate": 5.366190985638159e-05,
"loss": 1.9398,
"num_input_tokens_seen": 138936320,
"step": 530
},
{
"epoch": 0.1194331983805668,
"grad_norm": 0.34171062707901,
"learning_rate": 5.348782368720626e-05,
"loss": 1.841,
"num_input_tokens_seen": 139198464,
"step": 531
},
{
"epoch": 0.11965811965811966,
"grad_norm": 0.352012038230896,
"learning_rate": 5.3313695020000024e-05,
"loss": 1.7402,
"num_input_tokens_seen": 139460608,
"step": 532
},
{
"epoch": 0.11988304093567251,
"grad_norm": 0.2644294202327728,
"learning_rate": 5.313952597646568e-05,
"loss": 1.4092,
"num_input_tokens_seen": 139722752,
"step": 533
},
{
"epoch": 0.12010796221322537,
"grad_norm": 0.4258435368537903,
"learning_rate": 5.296531867879809e-05,
"loss": 1.6543,
"num_input_tokens_seen": 139984896,
"step": 534
},
{
"epoch": 0.12033288349077823,
"grad_norm": 0.26844051480293274,
"learning_rate": 5.279107524965819e-05,
"loss": 0.9761,
"num_input_tokens_seen": 140247040,
"step": 535
},
{
"epoch": 0.12055780476833108,
"grad_norm": 0.31205376982688904,
"learning_rate": 5.26167978121472e-05,
"loss": 2.008,
"num_input_tokens_seen": 140509184,
"step": 536
},
{
"epoch": 0.12078272604588394,
"grad_norm": 0.6046432256698608,
"learning_rate": 5.244248848978067e-05,
"loss": 1.9272,
"num_input_tokens_seen": 140771328,
"step": 537
},
{
"epoch": 0.1210076473234368,
"grad_norm": 0.33107465505599976,
"learning_rate": 5.226814940646269e-05,
"loss": 1.5775,
"num_input_tokens_seen": 141033472,
"step": 538
},
{
"epoch": 0.12123256860098966,
"grad_norm": 0.3236624300479889,
"learning_rate": 5.209378268645998e-05,
"loss": 1.3686,
"num_input_tokens_seen": 141295616,
"step": 539
},
{
"epoch": 0.1214574898785425,
"grad_norm": 0.31758353114128113,
"learning_rate": 5.191939045437601e-05,
"loss": 1.69,
"num_input_tokens_seen": 141557760,
"step": 540
},
{
"epoch": 0.12168241115609536,
"grad_norm": 0.2803833484649658,
"learning_rate": 5.174497483512506e-05,
"loss": 1.334,
"num_input_tokens_seen": 141819904,
"step": 541
},
{
"epoch": 0.12190733243364822,
"grad_norm": 0.4177348017692566,
"learning_rate": 5.157053795390642e-05,
"loss": 1.548,
"num_input_tokens_seen": 142082048,
"step": 542
},
{
"epoch": 0.12213225371120108,
"grad_norm": 0.3638695180416107,
"learning_rate": 5.139608193617845e-05,
"loss": 1.4954,
"num_input_tokens_seen": 142344192,
"step": 543
},
{
"epoch": 0.12235717498875394,
"grad_norm": 0.3036668598651886,
"learning_rate": 5.1221608907632665e-05,
"loss": 1.156,
"num_input_tokens_seen": 142606336,
"step": 544
},
{
"epoch": 0.1225820962663068,
"grad_norm": 0.2818496525287628,
"learning_rate": 5.104712099416785e-05,
"loss": 1.4805,
"num_input_tokens_seen": 142868480,
"step": 545
},
{
"epoch": 0.12280701754385964,
"grad_norm": 0.33742740750312805,
"learning_rate": 5.0872620321864185e-05,
"loss": 1.3688,
"num_input_tokens_seen": 143130624,
"step": 546
},
{
"epoch": 0.1230319388214125,
"grad_norm": 0.3776567578315735,
"learning_rate": 5.0698109016957274e-05,
"loss": 1.1657,
"num_input_tokens_seen": 143392768,
"step": 547
},
{
"epoch": 0.12325686009896536,
"grad_norm": 0.24142980575561523,
"learning_rate": 5.052358920581229e-05,
"loss": 1.3581,
"num_input_tokens_seen": 143654912,
"step": 548
},
{
"epoch": 0.12348178137651822,
"grad_norm": 0.34365004301071167,
"learning_rate": 5.034906301489808e-05,
"loss": 1.4533,
"num_input_tokens_seen": 143917056,
"step": 549
},
{
"epoch": 0.12370670265407108,
"grad_norm": 0.2355596125125885,
"learning_rate": 5.017453257076119e-05,
"loss": 1.2087,
"num_input_tokens_seen": 144179200,
"step": 550
},
{
"epoch": 0.12393162393162394,
"grad_norm": 0.2048463225364685,
"learning_rate": 5e-05,
"loss": 1.2536,
"num_input_tokens_seen": 144441344,
"step": 551
},
{
"epoch": 0.1241565452091768,
"grad_norm": 0.3444722890853882,
"learning_rate": 4.9825467429238834e-05,
"loss": 1.3632,
"num_input_tokens_seen": 144703488,
"step": 552
},
{
"epoch": 0.12438146648672964,
"grad_norm": 0.3106246292591095,
"learning_rate": 4.965093698510193e-05,
"loss": 1.3921,
"num_input_tokens_seen": 144965632,
"step": 553
},
{
"epoch": 0.1246063877642825,
"grad_norm": 0.2710109055042267,
"learning_rate": 4.947641079418773e-05,
"loss": 1.2625,
"num_input_tokens_seen": 145227776,
"step": 554
},
{
"epoch": 0.12483130904183536,
"grad_norm": 0.3017147183418274,
"learning_rate": 4.9301890983042744e-05,
"loss": 1.1092,
"num_input_tokens_seen": 145489920,
"step": 555
},
{
"epoch": 0.1250562303193882,
"grad_norm": 0.6395849585533142,
"learning_rate": 4.912737967813583e-05,
"loss": 1.3323,
"num_input_tokens_seen": 145752064,
"step": 556
},
{
"epoch": 0.12528115159694106,
"grad_norm": 0.5051564574241638,
"learning_rate": 4.895287900583216e-05,
"loss": 1.5473,
"num_input_tokens_seen": 146014208,
"step": 557
},
{
"epoch": 0.12550607287449392,
"grad_norm": 0.3306078016757965,
"learning_rate": 4.8778391092367346e-05,
"loss": 1.532,
"num_input_tokens_seen": 146276352,
"step": 558
},
{
"epoch": 0.12573099415204678,
"grad_norm": 0.31714704632759094,
"learning_rate": 4.860391806382157e-05,
"loss": 1.5618,
"num_input_tokens_seen": 146538496,
"step": 559
},
{
"epoch": 0.12595591542959964,
"grad_norm": 0.28177952766418457,
"learning_rate": 4.8429462046093585e-05,
"loss": 1.8316,
"num_input_tokens_seen": 146800640,
"step": 560
},
{
"epoch": 0.1261808367071525,
"grad_norm": 0.36294594407081604,
"learning_rate": 4.825502516487497e-05,
"loss": 1.1537,
"num_input_tokens_seen": 147062784,
"step": 561
},
{
"epoch": 0.12640575798470535,
"grad_norm": 0.3346196115016937,
"learning_rate": 4.8080609545624004e-05,
"loss": 1.2673,
"num_input_tokens_seen": 147324928,
"step": 562
},
{
"epoch": 0.1266306792622582,
"grad_norm": 0.236924409866333,
"learning_rate": 4.790621731354003e-05,
"loss": 1.114,
"num_input_tokens_seen": 147587072,
"step": 563
},
{
"epoch": 0.12685560053981107,
"grad_norm": 0.30414605140686035,
"learning_rate": 4.773185059353732e-05,
"loss": 1.4022,
"num_input_tokens_seen": 147849216,
"step": 564
},
{
"epoch": 0.12708052181736393,
"grad_norm": 0.27126508951187134,
"learning_rate": 4.755751151021934e-05,
"loss": 1.2234,
"num_input_tokens_seen": 148111360,
"step": 565
},
{
"epoch": 0.1273054430949168,
"grad_norm": 0.27040398120880127,
"learning_rate": 4.738320218785281e-05,
"loss": 1.4393,
"num_input_tokens_seen": 148373504,
"step": 566
},
{
"epoch": 0.12753036437246965,
"grad_norm": 0.3908785879611969,
"learning_rate": 4.720892475034181e-05,
"loss": 1.652,
"num_input_tokens_seen": 148635648,
"step": 567
},
{
"epoch": 0.1277552856500225,
"grad_norm": 0.49319472908973694,
"learning_rate": 4.703468132120193e-05,
"loss": 1.7887,
"num_input_tokens_seen": 148897792,
"step": 568
},
{
"epoch": 0.12798020692757534,
"grad_norm": 0.22037123143672943,
"learning_rate": 4.6860474023534335e-05,
"loss": 1.0219,
"num_input_tokens_seen": 149159936,
"step": 569
},
{
"epoch": 0.1282051282051282,
"grad_norm": 0.28868696093559265,
"learning_rate": 4.668630498000001e-05,
"loss": 1.4268,
"num_input_tokens_seen": 149422080,
"step": 570
},
{
"epoch": 0.12843004948268105,
"grad_norm": 0.3060884177684784,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.7064,
"num_input_tokens_seen": 149684224,
"step": 571
},
{
"epoch": 0.1286549707602339,
"grad_norm": 0.4254710376262665,
"learning_rate": 4.633809014361843e-05,
"loss": 1.5773,
"num_input_tokens_seen": 149946368,
"step": 572
},
{
"epoch": 0.12887989203778677,
"grad_norm": 0.30533093214035034,
"learning_rate": 4.616404859365907e-05,
"loss": 1.2379,
"num_input_tokens_seen": 150208512,
"step": 573
},
{
"epoch": 0.12910481331533963,
"grad_norm": 0.2888457477092743,
"learning_rate": 4.599005378355706e-05,
"loss": 1.2059,
"num_input_tokens_seen": 150470656,
"step": 574
},
{
"epoch": 0.1293297345928925,
"grad_norm": 0.2969624400138855,
"learning_rate": 4.5816107833384234e-05,
"loss": 1.5693,
"num_input_tokens_seen": 150732800,
"step": 575
},
{
"epoch": 0.12955465587044535,
"grad_norm": 0.3754268288612366,
"learning_rate": 4.564221286261709e-05,
"loss": 1.4652,
"num_input_tokens_seen": 150994944,
"step": 576
},
{
"epoch": 0.1297795771479982,
"grad_norm": 0.35749363899230957,
"learning_rate": 4.5468370990111006e-05,
"loss": 1.4626,
"num_input_tokens_seen": 151257088,
"step": 577
},
{
"epoch": 0.13000449842555106,
"grad_norm": 0.2594490647315979,
"learning_rate": 4.529458433407429e-05,
"loss": 1.643,
"num_input_tokens_seen": 151519232,
"step": 578
},
{
"epoch": 0.13022941970310392,
"grad_norm": 0.3177386522293091,
"learning_rate": 4.512085501204253e-05,
"loss": 1.86,
"num_input_tokens_seen": 151781376,
"step": 579
},
{
"epoch": 0.13045434098065678,
"grad_norm": 0.3247101902961731,
"learning_rate": 4.494718514085268e-05,
"loss": 1.7022,
"num_input_tokens_seen": 152043520,
"step": 580
},
{
"epoch": 0.13067926225820964,
"grad_norm": 0.2668783664703369,
"learning_rate": 4.477357683661734e-05,
"loss": 0.9691,
"num_input_tokens_seen": 152305664,
"step": 581
},
{
"epoch": 0.13090418353576247,
"grad_norm": 0.30591410398483276,
"learning_rate": 4.460003221469886e-05,
"loss": 1.7169,
"num_input_tokens_seen": 152567808,
"step": 582
},
{
"epoch": 0.13112910481331533,
"grad_norm": 0.22834749519824982,
"learning_rate": 4.442655338968373e-05,
"loss": 1.5456,
"num_input_tokens_seen": 152829952,
"step": 583
},
{
"epoch": 0.1313540260908682,
"grad_norm": 0.24803908169269562,
"learning_rate": 4.425314247535668e-05,
"loss": 1.4185,
"num_input_tokens_seen": 153092096,
"step": 584
},
{
"epoch": 0.13157894736842105,
"grad_norm": 0.25347429513931274,
"learning_rate": 4.407980158467495e-05,
"loss": 1.5213,
"num_input_tokens_seen": 153354240,
"step": 585
},
{
"epoch": 0.1318038686459739,
"grad_norm": 0.4988107979297638,
"learning_rate": 4.390653282974264e-05,
"loss": 1.4135,
"num_input_tokens_seen": 153616384,
"step": 586
},
{
"epoch": 0.13202878992352676,
"grad_norm": 0.27117958664894104,
"learning_rate": 4.373333832178478e-05,
"loss": 1.5608,
"num_input_tokens_seen": 153878528,
"step": 587
},
{
"epoch": 0.13225371120107962,
"grad_norm": 0.2566024363040924,
"learning_rate": 4.356022017112187e-05,
"loss": 1.3246,
"num_input_tokens_seen": 154140672,
"step": 588
},
{
"epoch": 0.13247863247863248,
"grad_norm": 0.4258366525173187,
"learning_rate": 4.3387180487143876e-05,
"loss": 1.5825,
"num_input_tokens_seen": 154402816,
"step": 589
},
{
"epoch": 0.13270355375618534,
"grad_norm": 0.3168386220932007,
"learning_rate": 4.321422137828479e-05,
"loss": 1.4613,
"num_input_tokens_seen": 154664960,
"step": 590
},
{
"epoch": 0.1329284750337382,
"grad_norm": 0.34318843483924866,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.1464,
"num_input_tokens_seen": 154927104,
"step": 591
},
{
"epoch": 0.13315339631129106,
"grad_norm": 0.321370005607605,
"learning_rate": 4.2868553314724425e-05,
"loss": 1.8339,
"num_input_tokens_seen": 155189248,
"step": 592
},
{
"epoch": 0.13337831758884391,
"grad_norm": 0.25251543521881104,
"learning_rate": 4.269584857187943e-05,
"loss": 1.0068,
"num_input_tokens_seen": 155451392,
"step": 593
},
{
"epoch": 0.13360323886639677,
"grad_norm": 0.32410669326782227,
"learning_rate": 4.252323282781453e-05,
"loss": 1.145,
"num_input_tokens_seen": 155713536,
"step": 594
},
{
"epoch": 0.1338281601439496,
"grad_norm": 0.26194649934768677,
"learning_rate": 4.23507081857981e-05,
"loss": 1.398,
"num_input_tokens_seen": 155975680,
"step": 595
},
{
"epoch": 0.13405308142150246,
"grad_norm": 0.40847936272621155,
"learning_rate": 4.2178276747988446e-05,
"loss": 1.723,
"num_input_tokens_seen": 156237824,
"step": 596
},
{
"epoch": 0.13427800269905532,
"grad_norm": 0.27586326003074646,
"learning_rate": 4.2005940615408264e-05,
"loss": 1.1967,
"num_input_tokens_seen": 156499968,
"step": 597
},
{
"epoch": 0.13450292397660818,
"grad_norm": 0.2727280557155609,
"learning_rate": 4.1833701887918904e-05,
"loss": 1.429,
"num_input_tokens_seen": 156762112,
"step": 598
},
{
"epoch": 0.13472784525416104,
"grad_norm": 0.3165155351161957,
"learning_rate": 4.166156266419489e-05,
"loss": 1.4949,
"num_input_tokens_seen": 157024256,
"step": 599
},
{
"epoch": 0.1349527665317139,
"grad_norm": 0.3037717342376709,
"learning_rate": 4.1489525041698387e-05,
"loss": 1.777,
"num_input_tokens_seen": 157286400,
"step": 600
},
{
"epoch": 0.13517768780926676,
"grad_norm": 0.26216423511505127,
"learning_rate": 4.131759111665349e-05,
"loss": 1.3849,
"num_input_tokens_seen": 157548544,
"step": 601
},
{
"epoch": 0.13540260908681961,
"grad_norm": 0.4158977270126343,
"learning_rate": 4.114576298402084e-05,
"loss": 1.1767,
"num_input_tokens_seen": 157810688,
"step": 602
},
{
"epoch": 0.13562753036437247,
"grad_norm": 0.23499810695648193,
"learning_rate": 4.0974042737472006e-05,
"loss": 1.1516,
"num_input_tokens_seen": 158072832,
"step": 603
},
{
"epoch": 0.13585245164192533,
"grad_norm": 0.30113333463668823,
"learning_rate": 4.080243246936399e-05,
"loss": 1.5608,
"num_input_tokens_seen": 158334976,
"step": 604
},
{
"epoch": 0.1360773729194782,
"grad_norm": 0.22452431917190552,
"learning_rate": 4.063093427071376e-05,
"loss": 1.2839,
"num_input_tokens_seen": 158597120,
"step": 605
},
{
"epoch": 0.13630229419703105,
"grad_norm": 0.44184988737106323,
"learning_rate": 4.045955023117276e-05,
"loss": 1.3822,
"num_input_tokens_seen": 158859264,
"step": 606
},
{
"epoch": 0.1365272154745839,
"grad_norm": 0.45626702904701233,
"learning_rate": 4.028828243900141e-05,
"loss": 1.4369,
"num_input_tokens_seen": 159121408,
"step": 607
},
{
"epoch": 0.13675213675213677,
"grad_norm": 0.2623542547225952,
"learning_rate": 4.0117132981043693e-05,
"loss": 1.2356,
"num_input_tokens_seen": 159383552,
"step": 608
},
{
"epoch": 0.1369770580296896,
"grad_norm": 0.2714481055736542,
"learning_rate": 3.9946103942701777e-05,
"loss": 1.4336,
"num_input_tokens_seen": 159645696,
"step": 609
},
{
"epoch": 0.13720197930724246,
"grad_norm": 0.4344947934150696,
"learning_rate": 3.9775197407910485e-05,
"loss": 1.3214,
"num_input_tokens_seen": 159907840,
"step": 610
},
{
"epoch": 0.13742690058479531,
"grad_norm": 0.3765691816806793,
"learning_rate": 3.960441545911204e-05,
"loss": 1.3965,
"num_input_tokens_seen": 160169984,
"step": 611
},
{
"epoch": 0.13765182186234817,
"grad_norm": 0.8579828143119812,
"learning_rate": 3.943376017723057e-05,
"loss": 1.4656,
"num_input_tokens_seen": 160432128,
"step": 612
},
{
"epoch": 0.13787674313990103,
"grad_norm": 0.2760258913040161,
"learning_rate": 3.926323364164684e-05,
"loss": 1.7707,
"num_input_tokens_seen": 160694272,
"step": 613
},
{
"epoch": 0.1381016644174539,
"grad_norm": 0.28735071420669556,
"learning_rate": 3.9092837930172884e-05,
"loss": 1.3462,
"num_input_tokens_seen": 160956416,
"step": 614
},
{
"epoch": 0.13832658569500675,
"grad_norm": 0.26882848143577576,
"learning_rate": 3.892257511902664e-05,
"loss": 1.3798,
"num_input_tokens_seen": 161218560,
"step": 615
},
{
"epoch": 0.1385515069725596,
"grad_norm": 0.7233993411064148,
"learning_rate": 3.875244728280676e-05,
"loss": 1.0232,
"num_input_tokens_seen": 161480704,
"step": 616
},
{
"epoch": 0.13877642825011247,
"grad_norm": 0.3466539978981018,
"learning_rate": 3.858245649446721e-05,
"loss": 1.3842,
"num_input_tokens_seen": 161742848,
"step": 617
},
{
"epoch": 0.13900134952766532,
"grad_norm": 0.24579237401485443,
"learning_rate": 3.841260482529214e-05,
"loss": 1.1613,
"num_input_tokens_seen": 162004992,
"step": 618
},
{
"epoch": 0.13922627080521818,
"grad_norm": 0.2630798816680908,
"learning_rate": 3.82428943448705e-05,
"loss": 1.3372,
"num_input_tokens_seen": 162267136,
"step": 619
},
{
"epoch": 0.13945119208277104,
"grad_norm": 0.27555617690086365,
"learning_rate": 3.807332712107097e-05,
"loss": 0.8722,
"num_input_tokens_seen": 162529280,
"step": 620
},
{
"epoch": 0.1396761133603239,
"grad_norm": 0.8287885785102844,
"learning_rate": 3.790390522001662e-05,
"loss": 1.4182,
"num_input_tokens_seen": 162791424,
"step": 621
},
{
"epoch": 0.13990103463787673,
"grad_norm": 0.28893017768859863,
"learning_rate": 3.773463070605987e-05,
"loss": 1.267,
"num_input_tokens_seen": 163053568,
"step": 622
},
{
"epoch": 0.1401259559154296,
"grad_norm": 0.28053274750709534,
"learning_rate": 3.756550564175727e-05,
"loss": 1.3201,
"num_input_tokens_seen": 163315712,
"step": 623
},
{
"epoch": 0.14035087719298245,
"grad_norm": 0.25568142533302307,
"learning_rate": 3.739653208784432e-05,
"loss": 1.3894,
"num_input_tokens_seen": 163577856,
"step": 624
},
{
"epoch": 0.1405757984705353,
"grad_norm": 0.3981480896472931,
"learning_rate": 3.7227712103210486e-05,
"loss": 1.3398,
"num_input_tokens_seen": 163840000,
"step": 625
},
{
"epoch": 0.14080071974808817,
"grad_norm": 0.31518107652664185,
"learning_rate": 3.705904774487396e-05,
"loss": 0.8814,
"num_input_tokens_seen": 164102144,
"step": 626
},
{
"epoch": 0.14102564102564102,
"grad_norm": 1.6972969770431519,
"learning_rate": 3.6890541067956776e-05,
"loss": 1.6809,
"num_input_tokens_seen": 164364288,
"step": 627
},
{
"epoch": 0.14125056230319388,
"grad_norm": 0.2689877152442932,
"learning_rate": 3.6722194125659556e-05,
"loss": 1.412,
"num_input_tokens_seen": 164626432,
"step": 628
},
{
"epoch": 0.14147548358074674,
"grad_norm": 0.22733396291732788,
"learning_rate": 3.655400896923672e-05,
"loss": 1.2612,
"num_input_tokens_seen": 164888576,
"step": 629
},
{
"epoch": 0.1417004048582996,
"grad_norm": 0.206431582570076,
"learning_rate": 3.6385987647971285e-05,
"loss": 1.1172,
"num_input_tokens_seen": 165150720,
"step": 630
},
{
"epoch": 0.14192532613585246,
"grad_norm": 0.42070865631103516,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.642,
"num_input_tokens_seen": 165412864,
"step": 631
},
{
"epoch": 0.14215024741340532,
"grad_norm": 0.3252650499343872,
"learning_rate": 3.605044469803854e-05,
"loss": 1.6878,
"num_input_tokens_seen": 165675008,
"step": 632
},
{
"epoch": 0.14237516869095818,
"grad_norm": 0.24793250858783722,
"learning_rate": 3.588292715785617e-05,
"loss": 1.5085,
"num_input_tokens_seen": 165937152,
"step": 633
},
{
"epoch": 0.14260008996851103,
"grad_norm": 0.2541254162788391,
"learning_rate": 3.5715581629751326e-05,
"loss": 1.0216,
"num_input_tokens_seen": 166199296,
"step": 634
},
{
"epoch": 0.14282501124606387,
"grad_norm": 0.41134512424468994,
"learning_rate": 3.554841015277641e-05,
"loss": 1.6166,
"num_input_tokens_seen": 166461440,
"step": 635
},
{
"epoch": 0.14304993252361672,
"grad_norm": 0.311408668756485,
"learning_rate": 3.5381414763863166e-05,
"loss": 1.1436,
"num_input_tokens_seen": 166723584,
"step": 636
},
{
"epoch": 0.14327485380116958,
"grad_norm": 0.29306137561798096,
"learning_rate": 3.5214597497797684e-05,
"loss": 1.8737,
"num_input_tokens_seen": 166985728,
"step": 637
},
{
"epoch": 0.14349977507872244,
"grad_norm": 0.6098542213439941,
"learning_rate": 3.504796038719567e-05,
"loss": 1.6415,
"num_input_tokens_seen": 167247872,
"step": 638
},
{
"epoch": 0.1437246963562753,
"grad_norm": 0.26231542229652405,
"learning_rate": 3.488150546247778e-05,
"loss": 1.5265,
"num_input_tokens_seen": 167510016,
"step": 639
},
{
"epoch": 0.14394961763382816,
"grad_norm": 0.3009147346019745,
"learning_rate": 3.471523475184472e-05,
"loss": 1.3886,
"num_input_tokens_seen": 167772160,
"step": 640
},
{
"epoch": 0.14417453891138102,
"grad_norm": 0.31200405955314636,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.5625,
"num_input_tokens_seen": 168034304,
"step": 641
},
{
"epoch": 0.14439946018893388,
"grad_norm": 0.3186989724636078,
"learning_rate": 3.438325407438837e-05,
"loss": 1.2634,
"num_input_tokens_seen": 168296448,
"step": 642
},
{
"epoch": 0.14462438146648673,
"grad_norm": 0.28268516063690186,
"learning_rate": 3.4217548152644885e-05,
"loss": 1.0662,
"num_input_tokens_seen": 168558592,
"step": 643
},
{
"epoch": 0.1448493027440396,
"grad_norm": 0.3295919597148895,
"learning_rate": 3.40520345350965e-05,
"loss": 1.787,
"num_input_tokens_seen": 168820736,
"step": 644
},
{
"epoch": 0.14507422402159245,
"grad_norm": 0.47454962134361267,
"learning_rate": 3.388671523847445e-05,
"loss": 1.6098,
"num_input_tokens_seen": 169082880,
"step": 645
},
{
"epoch": 0.1452991452991453,
"grad_norm": 0.37780699133872986,
"learning_rate": 3.372159227714218e-05,
"loss": 1.5719,
"num_input_tokens_seen": 169345024,
"step": 646
},
{
"epoch": 0.14552406657669817,
"grad_norm": 0.39475154876708984,
"learning_rate": 3.355666766307084e-05,
"loss": 2.0794,
"num_input_tokens_seen": 169607168,
"step": 647
},
{
"epoch": 0.145748987854251,
"grad_norm": 0.5949121713638306,
"learning_rate": 3.339194340581485e-05,
"loss": 1.1768,
"num_input_tokens_seen": 169869312,
"step": 648
},
{
"epoch": 0.14597390913180386,
"grad_norm": 0.33387547731399536,
"learning_rate": 3.322742151248725e-05,
"loss": 1.5277,
"num_input_tokens_seen": 170131456,
"step": 649
},
{
"epoch": 0.14619883040935672,
"grad_norm": 0.2769029140472412,
"learning_rate": 3.3063103987735433e-05,
"loss": 1.5336,
"num_input_tokens_seen": 170393600,
"step": 650
},
{
"epoch": 0.14642375168690958,
"grad_norm": 0.2560354471206665,
"learning_rate": 3.289899283371657e-05,
"loss": 1.1709,
"num_input_tokens_seen": 170655744,
"step": 651
},
{
"epoch": 0.14664867296446243,
"grad_norm": 0.4691326916217804,
"learning_rate": 3.273509005007327e-05,
"loss": 1.4694,
"num_input_tokens_seen": 170917888,
"step": 652
},
{
"epoch": 0.1468735942420153,
"grad_norm": 0.2512299716472626,
"learning_rate": 3.257139763390925e-05,
"loss": 1.4209,
"num_input_tokens_seen": 171180032,
"step": 653
},
{
"epoch": 0.14709851551956815,
"grad_norm": 0.36664241552352905,
"learning_rate": 3.240791757976491e-05,
"loss": 1.6359,
"num_input_tokens_seen": 171442176,
"step": 654
},
{
"epoch": 0.147323436797121,
"grad_norm": 1.0399090051651,
"learning_rate": 3.224465187959316e-05,
"loss": 1.8301,
"num_input_tokens_seen": 171704320,
"step": 655
},
{
"epoch": 0.14754835807467387,
"grad_norm": 0.38111740350723267,
"learning_rate": 3.2081602522734986e-05,
"loss": 2.0391,
"num_input_tokens_seen": 171966464,
"step": 656
},
{
"epoch": 0.14777327935222673,
"grad_norm": 0.2596481740474701,
"learning_rate": 3.1918771495895396e-05,
"loss": 1.0399,
"num_input_tokens_seen": 172228608,
"step": 657
},
{
"epoch": 0.14799820062977959,
"grad_norm": 0.24174773693084717,
"learning_rate": 3.1756160783119016e-05,
"loss": 1.0012,
"num_input_tokens_seen": 172490752,
"step": 658
},
{
"epoch": 0.14822312190733244,
"grad_norm": 0.39571988582611084,
"learning_rate": 3.1593772365766105e-05,
"loss": 1.9274,
"num_input_tokens_seen": 172752896,
"step": 659
},
{
"epoch": 0.1484480431848853,
"grad_norm": 0.2537073493003845,
"learning_rate": 3.1431608222488275e-05,
"loss": 1.1829,
"num_input_tokens_seen": 173015040,
"step": 660
},
{
"epoch": 0.14867296446243813,
"grad_norm": 0.30034080147743225,
"learning_rate": 3.12696703292044e-05,
"loss": 1.5033,
"num_input_tokens_seen": 173277184,
"step": 661
},
{
"epoch": 0.148897885739991,
"grad_norm": 0.46739694476127625,
"learning_rate": 3.110796065907665e-05,
"loss": 2.1391,
"num_input_tokens_seen": 173539328,
"step": 662
},
{
"epoch": 0.14912280701754385,
"grad_norm": 0.2857431471347809,
"learning_rate": 3.09464811824863e-05,
"loss": 1.794,
"num_input_tokens_seen": 173801472,
"step": 663
},
{
"epoch": 0.1493477282950967,
"grad_norm": 0.2435784935951233,
"learning_rate": 3.078523386700982e-05,
"loss": 1.3756,
"num_input_tokens_seen": 174063616,
"step": 664
},
{
"epoch": 0.14957264957264957,
"grad_norm": 0.31536754965782166,
"learning_rate": 3.062422067739485e-05,
"loss": 1.7282,
"num_input_tokens_seen": 174325760,
"step": 665
},
{
"epoch": 0.14979757085020243,
"grad_norm": 0.34157782793045044,
"learning_rate": 3.046344357553632e-05,
"loss": 1.6728,
"num_input_tokens_seen": 174587904,
"step": 666
},
{
"epoch": 0.15002249212775529,
"grad_norm": 0.2535393238067627,
"learning_rate": 3.0302904520452447e-05,
"loss": 1.5404,
"num_input_tokens_seen": 174850048,
"step": 667
},
{
"epoch": 0.15024741340530814,
"grad_norm": 0.40980955958366394,
"learning_rate": 3.0142605468260978e-05,
"loss": 1.3846,
"num_input_tokens_seen": 175112192,
"step": 668
},
{
"epoch": 0.150472334682861,
"grad_norm": 0.28387653827667236,
"learning_rate": 2.9982548372155263e-05,
"loss": 1.5931,
"num_input_tokens_seen": 175374336,
"step": 669
},
{
"epoch": 0.15069725596041386,
"grad_norm": 0.257438600063324,
"learning_rate": 2.9822735182380496e-05,
"loss": 1.2416,
"num_input_tokens_seen": 175636480,
"step": 670
},
{
"epoch": 0.15092217723796672,
"grad_norm": 0.3665998578071594,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.0135,
"num_input_tokens_seen": 175898624,
"step": 671
},
{
"epoch": 0.15114709851551958,
"grad_norm": 0.32949748635292053,
"learning_rate": 2.950384830792136e-05,
"loss": 1.6333,
"num_input_tokens_seen": 176160768,
"step": 672
},
{
"epoch": 0.15137201979307244,
"grad_norm": 0.26206234097480774,
"learning_rate": 2.934477850877292e-05,
"loss": 1.581,
"num_input_tokens_seen": 176422912,
"step": 673
},
{
"epoch": 0.15159694107062527,
"grad_norm": 0.24233995378017426,
"learning_rate": 2.918596038697995e-05,
"loss": 1.556,
"num_input_tokens_seen": 176685056,
"step": 674
},
{
"epoch": 0.15182186234817813,
"grad_norm": 0.3522930145263672,
"learning_rate": 2.9027395877691144e-05,
"loss": 1.3463,
"num_input_tokens_seen": 176947200,
"step": 675
},
{
"epoch": 0.15204678362573099,
"grad_norm": 0.25320544838905334,
"learning_rate": 2.886908691296504e-05,
"loss": 1.6151,
"num_input_tokens_seen": 177209344,
"step": 676
},
{
"epoch": 0.15227170490328384,
"grad_norm": 0.3068138062953949,
"learning_rate": 2.8711035421746367e-05,
"loss": 1.5924,
"num_input_tokens_seen": 177471488,
"step": 677
},
{
"epoch": 0.1524966261808367,
"grad_norm": 0.9334062337875366,
"learning_rate": 2.8553243329842714e-05,
"loss": 1.3416,
"num_input_tokens_seen": 177733632,
"step": 678
},
{
"epoch": 0.15272154745838956,
"grad_norm": 0.34283843636512756,
"learning_rate": 2.8395712559900877e-05,
"loss": 1.313,
"num_input_tokens_seen": 177995776,
"step": 679
},
{
"epoch": 0.15294646873594242,
"grad_norm": 0.36047711968421936,
"learning_rate": 2.823844503138363e-05,
"loss": 1.283,
"num_input_tokens_seen": 178257920,
"step": 680
},
{
"epoch": 0.15317139001349528,
"grad_norm": 0.47825443744659424,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.3074,
"num_input_tokens_seen": 178520064,
"step": 681
},
{
"epoch": 0.15339631129104814,
"grad_norm": 0.26862478256225586,
"learning_rate": 2.7924707360412746e-05,
"loss": 1.4357,
"num_input_tokens_seen": 178782208,
"step": 682
},
{
"epoch": 0.153621232568601,
"grad_norm": 0.6874963641166687,
"learning_rate": 2.776824104075364e-05,
"loss": 1.9579,
"num_input_tokens_seen": 179044352,
"step": 683
},
{
"epoch": 0.15384615384615385,
"grad_norm": 0.32274290919303894,
"learning_rate": 2.761204560806152e-05,
"loss": 1.1533,
"num_input_tokens_seen": 179306496,
"step": 684
},
{
"epoch": 0.1540710751237067,
"grad_norm": 0.5013816952705383,
"learning_rate": 2.7456122965528475e-05,
"loss": 2.2852,
"num_input_tokens_seen": 179568640,
"step": 685
},
{
"epoch": 0.15429599640125957,
"grad_norm": 0.23807933926582336,
"learning_rate": 2.7300475013022663e-05,
"loss": 0.9529,
"num_input_tokens_seen": 179830784,
"step": 686
},
{
"epoch": 0.1545209176788124,
"grad_norm": 0.39833179116249084,
"learning_rate": 2.7145103647065308e-05,
"loss": 1.4946,
"num_input_tokens_seen": 180092928,
"step": 687
},
{
"epoch": 0.15474583895636526,
"grad_norm": 0.2603168487548828,
"learning_rate": 2.699001076080742e-05,
"loss": 1.1422,
"num_input_tokens_seen": 180355072,
"step": 688
},
{
"epoch": 0.15497076023391812,
"grad_norm": 0.2643228769302368,
"learning_rate": 2.6835198244006927e-05,
"loss": 1.402,
"num_input_tokens_seen": 180617216,
"step": 689
},
{
"epoch": 0.15519568151147098,
"grad_norm": 0.3902268409729004,
"learning_rate": 2.668066798300545e-05,
"loss": 1.3769,
"num_input_tokens_seen": 180879360,
"step": 690
},
{
"epoch": 0.15542060278902384,
"grad_norm": 0.22581857442855835,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.8756,
"num_input_tokens_seen": 181141504,
"step": 691
},
{
"epoch": 0.1556455240665767,
"grad_norm": 0.2835070490837097,
"learning_rate": 2.6372461756547306e-05,
"loss": 1.6248,
"num_input_tokens_seen": 181403648,
"step": 692
},
{
"epoch": 0.15587044534412955,
"grad_norm": 0.3780955374240875,
"learning_rate": 2.6218789546486234e-05,
"loss": 1.7845,
"num_input_tokens_seen": 181665792,
"step": 693
},
{
"epoch": 0.1560953666216824,
"grad_norm": 0.2619756758213043,
"learning_rate": 2.6065407102969664e-05,
"loss": 1.6579,
"num_input_tokens_seen": 181927936,
"step": 694
},
{
"epoch": 0.15632028789923527,
"grad_norm": 0.27253520488739014,
"learning_rate": 2.591231629491423e-05,
"loss": 1.732,
"num_input_tokens_seen": 182190080,
"step": 695
},
{
"epoch": 0.15654520917678813,
"grad_norm": 0.5601329207420349,
"learning_rate": 2.575951898768315e-05,
"loss": 1.2634,
"num_input_tokens_seen": 182452224,
"step": 696
},
{
"epoch": 0.156770130454341,
"grad_norm": 0.19660979509353638,
"learning_rate": 2.560701704306336e-05,
"loss": 1.0427,
"num_input_tokens_seen": 182714368,
"step": 697
},
{
"epoch": 0.15699505173189385,
"grad_norm": 0.38262882828712463,
"learning_rate": 2.545481231924296e-05,
"loss": 1.6951,
"num_input_tokens_seen": 182976512,
"step": 698
},
{
"epoch": 0.1572199730094467,
"grad_norm": 0.25855690240859985,
"learning_rate": 2.5302906670788462e-05,
"loss": 1.2998,
"num_input_tokens_seen": 183238656,
"step": 699
},
{
"epoch": 0.15744489428699954,
"grad_norm": 0.3618425726890564,
"learning_rate": 2.5151301948622237e-05,
"loss": 1.4474,
"num_input_tokens_seen": 183500800,
"step": 700
},
{
"epoch": 0.1576698155645524,
"grad_norm": 0.2380896806716919,
"learning_rate": 2.500000000000001e-05,
"loss": 1.1,
"num_input_tokens_seen": 183762944,
"step": 701
},
{
"epoch": 0.15789473684210525,
"grad_norm": 0.27352097630500793,
"learning_rate": 2.4849002668488245e-05,
"loss": 1.2378,
"num_input_tokens_seen": 184025088,
"step": 702
},
{
"epoch": 0.1581196581196581,
"grad_norm": 0.241080641746521,
"learning_rate": 2.469831179394182e-05,
"loss": 1.6727,
"num_input_tokens_seen": 184287232,
"step": 703
},
{
"epoch": 0.15834457939721097,
"grad_norm": 0.5239187479019165,
"learning_rate": 2.4547929212481435e-05,
"loss": 1.0828,
"num_input_tokens_seen": 184549376,
"step": 704
},
{
"epoch": 0.15856950067476383,
"grad_norm": 0.21369491517543793,
"learning_rate": 2.4397856756471432e-05,
"loss": 0.8688,
"num_input_tokens_seen": 184811520,
"step": 705
},
{
"epoch": 0.1587944219523167,
"grad_norm": 0.3311217427253723,
"learning_rate": 2.4248096254497288e-05,
"loss": 1.04,
"num_input_tokens_seen": 185073664,
"step": 706
},
{
"epoch": 0.15901934322986955,
"grad_norm": 0.6069524884223938,
"learning_rate": 2.4098649531343497e-05,
"loss": 1.3251,
"num_input_tokens_seen": 185335808,
"step": 707
},
{
"epoch": 0.1592442645074224,
"grad_norm": 0.3160417377948761,
"learning_rate": 2.39495184079712e-05,
"loss": 1.514,
"num_input_tokens_seen": 185597952,
"step": 708
},
{
"epoch": 0.15946918578497526,
"grad_norm": 0.34821102023124695,
"learning_rate": 2.3800704701496053e-05,
"loss": 1.3157,
"num_input_tokens_seen": 185860096,
"step": 709
},
{
"epoch": 0.15969410706252812,
"grad_norm": 0.37680163979530334,
"learning_rate": 2.3652210225166122e-05,
"loss": 1.7779,
"num_input_tokens_seen": 186122240,
"step": 710
},
{
"epoch": 0.15991902834008098,
"grad_norm": 0.6597973108291626,
"learning_rate": 2.350403678833976e-05,
"loss": 1.0905,
"num_input_tokens_seen": 186384384,
"step": 711
},
{
"epoch": 0.16014394961763384,
"grad_norm": 0.2757372260093689,
"learning_rate": 2.33561861964635e-05,
"loss": 1.3679,
"num_input_tokens_seen": 186646528,
"step": 712
},
{
"epoch": 0.16036887089518667,
"grad_norm": 0.2820758819580078,
"learning_rate": 2.3208660251050158e-05,
"loss": 1.9283,
"num_input_tokens_seen": 186908672,
"step": 713
},
{
"epoch": 0.16059379217273953,
"grad_norm": 0.2349315583705902,
"learning_rate": 2.3061460749656844e-05,
"loss": 1.0785,
"num_input_tokens_seen": 187170816,
"step": 714
},
{
"epoch": 0.1608187134502924,
"grad_norm": 0.4976591169834137,
"learning_rate": 2.2914589485863014e-05,
"loss": 2.0155,
"num_input_tokens_seen": 187432960,
"step": 715
},
{
"epoch": 0.16104363472784525,
"grad_norm": 0.2699415683746338,
"learning_rate": 2.2768048249248648e-05,
"loss": 1.485,
"num_input_tokens_seen": 187695104,
"step": 716
},
{
"epoch": 0.1612685560053981,
"grad_norm": 0.33777254819869995,
"learning_rate": 2.2621838825372493e-05,
"loss": 1.2485,
"num_input_tokens_seen": 187957248,
"step": 717
},
{
"epoch": 0.16149347728295096,
"grad_norm": 0.27162325382232666,
"learning_rate": 2.247596299575022e-05,
"loss": 1.7082,
"num_input_tokens_seen": 188219392,
"step": 718
},
{
"epoch": 0.16171839856050382,
"grad_norm": 0.5944697856903076,
"learning_rate": 2.23304225378328e-05,
"loss": 1.4907,
"num_input_tokens_seen": 188481536,
"step": 719
},
{
"epoch": 0.16194331983805668,
"grad_norm": 0.5989211201667786,
"learning_rate": 2.218521922498476e-05,
"loss": 1.4793,
"num_input_tokens_seen": 188743680,
"step": 720
},
{
"epoch": 0.16216824111560954,
"grad_norm": 0.33278682827949524,
"learning_rate": 2.2040354826462668e-05,
"loss": 1.9338,
"num_input_tokens_seen": 189005824,
"step": 721
},
{
"epoch": 0.1623931623931624,
"grad_norm": 0.21896512806415558,
"learning_rate": 2.1895831107393484e-05,
"loss": 0.9001,
"num_input_tokens_seen": 189267968,
"step": 722
},
{
"epoch": 0.16261808367071526,
"grad_norm": 0.37337714433670044,
"learning_rate": 2.1751649828753106e-05,
"loss": 1.8342,
"num_input_tokens_seen": 189530112,
"step": 723
},
{
"epoch": 0.16284300494826812,
"grad_norm": 0.36519575119018555,
"learning_rate": 2.160781274734495e-05,
"loss": 1.9973,
"num_input_tokens_seen": 189792256,
"step": 724
},
{
"epoch": 0.16306792622582097,
"grad_norm": 0.3658199608325958,
"learning_rate": 2.1464321615778422e-05,
"loss": 1.4951,
"num_input_tokens_seen": 190054400,
"step": 725
},
{
"epoch": 0.1632928475033738,
"grad_norm": 0.5318383574485779,
"learning_rate": 2.132117818244771e-05,
"loss": 1.8081,
"num_input_tokens_seen": 190316544,
"step": 726
},
{
"epoch": 0.16351776878092666,
"grad_norm": 0.4171576499938965,
"learning_rate": 2.117838419151034e-05,
"loss": 1.2466,
"num_input_tokens_seen": 190578688,
"step": 727
},
{
"epoch": 0.16374269005847952,
"grad_norm": 0.2993619441986084,
"learning_rate": 2.103594138286607e-05,
"loss": 1.3636,
"num_input_tokens_seen": 190840832,
"step": 728
},
{
"epoch": 0.16396761133603238,
"grad_norm": 0.31914806365966797,
"learning_rate": 2.0893851492135537e-05,
"loss": 1.4931,
"num_input_tokens_seen": 191102976,
"step": 729
},
{
"epoch": 0.16419253261358524,
"grad_norm": 0.4262048006057739,
"learning_rate": 2.0752116250639225e-05,
"loss": 1.607,
"num_input_tokens_seen": 191365120,
"step": 730
},
{
"epoch": 0.1644174538911381,
"grad_norm": 0.2858421504497528,
"learning_rate": 2.061073738537635e-05,
"loss": 1.5553,
"num_input_tokens_seen": 191627264,
"step": 731
},
{
"epoch": 0.16464237516869096,
"grad_norm": 0.24495238065719604,
"learning_rate": 2.0469716619003725e-05,
"loss": 1.4652,
"num_input_tokens_seen": 191889408,
"step": 732
},
{
"epoch": 0.16486729644624382,
"grad_norm": 0.3603944480419159,
"learning_rate": 2.0329055669814934e-05,
"loss": 1.5141,
"num_input_tokens_seen": 192151552,
"step": 733
},
{
"epoch": 0.16509221772379667,
"grad_norm": 0.2774426341056824,
"learning_rate": 2.0188756251719203e-05,
"loss": 1.5936,
"num_input_tokens_seen": 192413696,
"step": 734
},
{
"epoch": 0.16531713900134953,
"grad_norm": 0.3978561460971832,
"learning_rate": 2.0048820074220715e-05,
"loss": 1.6921,
"num_input_tokens_seen": 192675840,
"step": 735
},
{
"epoch": 0.1655420602789024,
"grad_norm": 0.39905136823654175,
"learning_rate": 1.9909248842397584e-05,
"loss": 1.6292,
"num_input_tokens_seen": 192937984,
"step": 736
},
{
"epoch": 0.16576698155645525,
"grad_norm": 0.24895912408828735,
"learning_rate": 1.977004425688126e-05,
"loss": 1.4281,
"num_input_tokens_seen": 193200128,
"step": 737
},
{
"epoch": 0.1659919028340081,
"grad_norm": 0.3778083920478821,
"learning_rate": 1.9631208013835678e-05,
"loss": 1.4312,
"num_input_tokens_seen": 193462272,
"step": 738
},
{
"epoch": 0.16621682411156094,
"grad_norm": 0.8549690246582031,
"learning_rate": 1.9492741804936622e-05,
"loss": 1.5366,
"num_input_tokens_seen": 193724416,
"step": 739
},
{
"epoch": 0.1664417453891138,
"grad_norm": 0.2696658670902252,
"learning_rate": 1.9354647317351188e-05,
"loss": 1.452,
"num_input_tokens_seen": 193986560,
"step": 740
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.24030734598636627,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.6674,
"num_input_tokens_seen": 194248704,
"step": 741
},
{
"epoch": 0.16689158794421952,
"grad_norm": 0.2628820538520813,
"learning_rate": 1.9079580232122303e-05,
"loss": 1.8918,
"num_input_tokens_seen": 194510848,
"step": 742
},
{
"epoch": 0.16711650922177237,
"grad_norm": 0.3136844038963318,
"learning_rate": 1.8942610986084486e-05,
"loss": 1.1222,
"num_input_tokens_seen": 194772992,
"step": 743
},
{
"epoch": 0.16734143049932523,
"grad_norm": 0.27816563844680786,
"learning_rate": 1.8806020164530702e-05,
"loss": 1.7347,
"num_input_tokens_seen": 195035136,
"step": 744
},
{
"epoch": 0.1675663517768781,
"grad_norm": 0.3501325845718384,
"learning_rate": 1.866980943177699e-05,
"loss": 1.9938,
"num_input_tokens_seen": 195297280,
"step": 745
},
{
"epoch": 0.16779127305443095,
"grad_norm": 0.32793718576431274,
"learning_rate": 1.8533980447508137e-05,
"loss": 1.5852,
"num_input_tokens_seen": 195559424,
"step": 746
},
{
"epoch": 0.1680161943319838,
"grad_norm": 0.26959773898124695,
"learning_rate": 1.8398534866757454e-05,
"loss": 1.4583,
"num_input_tokens_seen": 195821568,
"step": 747
},
{
"epoch": 0.16824111560953667,
"grad_norm": 0.29915785789489746,
"learning_rate": 1.8263474339886628e-05,
"loss": 1.7142,
"num_input_tokens_seen": 196083712,
"step": 748
},
{
"epoch": 0.16846603688708953,
"grad_norm": 0.37239888310432434,
"learning_rate": 1.8128800512565513e-05,
"loss": 1.6461,
"num_input_tokens_seen": 196345856,
"step": 749
},
{
"epoch": 0.16869095816464239,
"grad_norm": 0.31688031554222107,
"learning_rate": 1.7994515025752217e-05,
"loss": 1.4778,
"num_input_tokens_seen": 196608000,
"step": 750
},
{
"epoch": 0.16891587944219524,
"grad_norm": 0.32182300090789795,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.5883,
"num_input_tokens_seen": 196870144,
"step": 751
},
{
"epoch": 0.16914080071974807,
"grad_norm": 0.2562112808227539,
"learning_rate": 1.7727115613802465e-05,
"loss": 1.409,
"num_input_tokens_seen": 197132288,
"step": 752
},
{
"epoch": 0.16936572199730093,
"grad_norm": 0.501336932182312,
"learning_rate": 1.7594004946843456e-05,
"loss": 1.5277,
"num_input_tokens_seen": 197394432,
"step": 753
},
{
"epoch": 0.1695906432748538,
"grad_norm": 0.2757022976875305,
"learning_rate": 1.746128913670746e-05,
"loss": 1.2632,
"num_input_tokens_seen": 197656576,
"step": 754
},
{
"epoch": 0.16981556455240665,
"grad_norm": 0.2268674522638321,
"learning_rate": 1.7328969800494726e-05,
"loss": 1.2268,
"num_input_tokens_seen": 197918720,
"step": 755
},
{
"epoch": 0.1700404858299595,
"grad_norm": 0.3350222706794739,
"learning_rate": 1.7197048550474643e-05,
"loss": 1.7233,
"num_input_tokens_seen": 198180864,
"step": 756
},
{
"epoch": 0.17026540710751237,
"grad_norm": 0.3129500150680542,
"learning_rate": 1.7065526994065973e-05,
"loss": 1.1204,
"num_input_tokens_seen": 198443008,
"step": 757
},
{
"epoch": 0.17049032838506523,
"grad_norm": 0.295625776052475,
"learning_rate": 1.6934406733817414e-05,
"loss": 0.8985,
"num_input_tokens_seen": 198705152,
"step": 758
},
{
"epoch": 0.17071524966261808,
"grad_norm": 0.26125019788742065,
"learning_rate": 1.680368936738792e-05,
"loss": 1.0675,
"num_input_tokens_seen": 198967296,
"step": 759
},
{
"epoch": 0.17094017094017094,
"grad_norm": 0.2649379074573517,
"learning_rate": 1.667337648752738e-05,
"loss": 1.4381,
"num_input_tokens_seen": 199229440,
"step": 760
},
{
"epoch": 0.1711650922177238,
"grad_norm": 0.24367214739322662,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.4375,
"num_input_tokens_seen": 199491584,
"step": 761
},
{
"epoch": 0.17139001349527666,
"grad_norm": 0.4118271470069885,
"learning_rate": 1.6413970533850498e-05,
"loss": 1.8085,
"num_input_tokens_seen": 199753728,
"step": 762
},
{
"epoch": 0.17161493477282952,
"grad_norm": 0.24232937395572662,
"learning_rate": 1.6284880620813848e-05,
"loss": 0.9696,
"num_input_tokens_seen": 200015872,
"step": 763
},
{
"epoch": 0.17183985605038238,
"grad_norm": 0.2732202708721161,
"learning_rate": 1.615620151586697e-05,
"loss": 1.8716,
"num_input_tokens_seen": 200278016,
"step": 764
},
{
"epoch": 0.1720647773279352,
"grad_norm": 0.3352683186531067,
"learning_rate": 1.602793478692419e-05,
"loss": 1.9496,
"num_input_tokens_seen": 200540160,
"step": 765
},
{
"epoch": 0.17228969860548807,
"grad_norm": 0.35812363028526306,
"learning_rate": 1.5900081996875083e-05,
"loss": 1.3767,
"num_input_tokens_seen": 200802304,
"step": 766
},
{
"epoch": 0.17251461988304093,
"grad_norm": 0.24810630083084106,
"learning_rate": 1.5772644703565565e-05,
"loss": 1.4532,
"num_input_tokens_seen": 201064448,
"step": 767
},
{
"epoch": 0.17273954116059378,
"grad_norm": 0.2576179504394531,
"learning_rate": 1.5645624459778856e-05,
"loss": 1.1053,
"num_input_tokens_seen": 201326592,
"step": 768
},
{
"epoch": 0.17296446243814664,
"grad_norm": 0.45622938871383667,
"learning_rate": 1.551902281321651e-05,
"loss": 1.6785,
"num_input_tokens_seen": 201588736,
"step": 769
},
{
"epoch": 0.1731893837156995,
"grad_norm": 0.35794106125831604,
"learning_rate": 1.5392841306479666e-05,
"loss": 1.004,
"num_input_tokens_seen": 201850880,
"step": 770
},
{
"epoch": 0.17341430499325236,
"grad_norm": 0.34276673197746277,
"learning_rate": 1.526708147705013e-05,
"loss": 1.2476,
"num_input_tokens_seen": 202113024,
"step": 771
},
{
"epoch": 0.17363922627080522,
"grad_norm": 0.318548321723938,
"learning_rate": 1.5141744857271778e-05,
"loss": 1.6177,
"num_input_tokens_seen": 202375168,
"step": 772
},
{
"epoch": 0.17386414754835808,
"grad_norm": 0.2709994316101074,
"learning_rate": 1.5016832974331724e-05,
"loss": 1.4153,
"num_input_tokens_seen": 202637312,
"step": 773
},
{
"epoch": 0.17408906882591094,
"grad_norm": 0.23188814520835876,
"learning_rate": 1.4892347350241881e-05,
"loss": 1.1132,
"num_input_tokens_seen": 202899456,
"step": 774
},
{
"epoch": 0.1743139901034638,
"grad_norm": 0.23023724555969238,
"learning_rate": 1.4768289501820265e-05,
"loss": 1.0796,
"num_input_tokens_seen": 203161600,
"step": 775
},
{
"epoch": 0.17453891138101665,
"grad_norm": 0.23310570418834686,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.9287,
"num_input_tokens_seen": 203423744,
"step": 776
},
{
"epoch": 0.1747638326585695,
"grad_norm": 0.47127315402030945,
"learning_rate": 1.4521463173173965e-05,
"loss": 1.7062,
"num_input_tokens_seen": 203685888,
"step": 777
},
{
"epoch": 0.17498875393612237,
"grad_norm": 0.2425903081893921,
"learning_rate": 1.439869770045018e-05,
"loss": 1.509,
"num_input_tokens_seen": 203948032,
"step": 778
},
{
"epoch": 0.1752136752136752,
"grad_norm": 0.29599592089653015,
"learning_rate": 1.4276366018359844e-05,
"loss": 1.4202,
"num_input_tokens_seen": 204210176,
"step": 779
},
{
"epoch": 0.17543859649122806,
"grad_norm": 0.4893275499343872,
"learning_rate": 1.4154469617475863e-05,
"loss": 1.2288,
"num_input_tokens_seen": 204472320,
"step": 780
},
{
"epoch": 0.17566351776878092,
"grad_norm": 0.6782318353652954,
"learning_rate": 1.4033009983067452e-05,
"loss": 1.5115,
"num_input_tokens_seen": 204734464,
"step": 781
},
{
"epoch": 0.17588843904633378,
"grad_norm": 0.359073668718338,
"learning_rate": 1.3911988595081893e-05,
"loss": 2.0383,
"num_input_tokens_seen": 204996608,
"step": 782
},
{
"epoch": 0.17611336032388664,
"grad_norm": 0.35052844882011414,
"learning_rate": 1.3791406928126638e-05,
"loss": 1.364,
"num_input_tokens_seen": 205258752,
"step": 783
},
{
"epoch": 0.1763382816014395,
"grad_norm": 0.3411898910999298,
"learning_rate": 1.367126645145121e-05,
"loss": 1.9237,
"num_input_tokens_seen": 205520896,
"step": 784
},
{
"epoch": 0.17656320287899235,
"grad_norm": 0.28995242714881897,
"learning_rate": 1.3551568628929434e-05,
"loss": 1.9925,
"num_input_tokens_seen": 205783040,
"step": 785
},
{
"epoch": 0.1767881241565452,
"grad_norm": 0.2879210412502289,
"learning_rate": 1.3432314919041478e-05,
"loss": 1.4678,
"num_input_tokens_seen": 206045184,
"step": 786
},
{
"epoch": 0.17701304543409807,
"grad_norm": 0.6357771158218384,
"learning_rate": 1.3313506774856177e-05,
"loss": 1.8581,
"num_input_tokens_seen": 206307328,
"step": 787
},
{
"epoch": 0.17723796671165093,
"grad_norm": 0.2830122113227844,
"learning_rate": 1.3195145644013285e-05,
"loss": 1.3386,
"num_input_tokens_seen": 206569472,
"step": 788
},
{
"epoch": 0.1774628879892038,
"grad_norm": 0.486817866563797,
"learning_rate": 1.3077232968705805e-05,
"loss": 1.3974,
"num_input_tokens_seen": 206831616,
"step": 789
},
{
"epoch": 0.17768780926675665,
"grad_norm": 0.27353304624557495,
"learning_rate": 1.29597701856625e-05,
"loss": 1.7853,
"num_input_tokens_seen": 207093760,
"step": 790
},
{
"epoch": 0.1779127305443095,
"grad_norm": 0.2737731337547302,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.5908,
"num_input_tokens_seen": 207355904,
"step": 791
},
{
"epoch": 0.17813765182186234,
"grad_norm": 0.28885483741760254,
"learning_rate": 1.2726200015856892e-05,
"loss": 1.3893,
"num_input_tokens_seen": 207618048,
"step": 792
},
{
"epoch": 0.1783625730994152,
"grad_norm": 0.3733929991722107,
"learning_rate": 1.2610095475073414e-05,
"loss": 1.5299,
"num_input_tokens_seen": 207880192,
"step": 793
},
{
"epoch": 0.17858749437696805,
"grad_norm": 0.27227166295051575,
"learning_rate": 1.2494446518477022e-05,
"loss": 1.3791,
"num_input_tokens_seen": 208142336,
"step": 794
},
{
"epoch": 0.1788124156545209,
"grad_norm": 0.41097357869148254,
"learning_rate": 1.2379254555213788e-05,
"loss": 0.7853,
"num_input_tokens_seen": 208404480,
"step": 795
},
{
"epoch": 0.17903733693207377,
"grad_norm": 0.4259317219257355,
"learning_rate": 1.22645209888614e-05,
"loss": 1.1717,
"num_input_tokens_seen": 208666624,
"step": 796
},
{
"epoch": 0.17926225820962663,
"grad_norm": 0.2950097918510437,
"learning_rate": 1.2150247217412186e-05,
"loss": 1.7082,
"num_input_tokens_seen": 208928768,
"step": 797
},
{
"epoch": 0.1794871794871795,
"grad_norm": 0.2869075834751129,
"learning_rate": 1.203643463325596e-05,
"loss": 1.7419,
"num_input_tokens_seen": 209190912,
"step": 798
},
{
"epoch": 0.17971210076473235,
"grad_norm": 0.41858625411987305,
"learning_rate": 1.1923084623163172e-05,
"loss": 1.5839,
"num_input_tokens_seen": 209453056,
"step": 799
},
{
"epoch": 0.1799370220422852,
"grad_norm": 0.22646434605121613,
"learning_rate": 1.1810198568267905e-05,
"loss": 1.3877,
"num_input_tokens_seen": 209715200,
"step": 800
},
{
"epoch": 0.18016194331983806,
"grad_norm": 0.25302788615226746,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.417,
"num_input_tokens_seen": 209977344,
"step": 801
},
{
"epoch": 0.18038686459739092,
"grad_norm": 0.269723117351532,
"learning_rate": 1.1585823820323843e-05,
"loss": 1.3995,
"num_input_tokens_seen": 210239488,
"step": 802
},
{
"epoch": 0.18061178587494378,
"grad_norm": 0.32225891947746277,
"learning_rate": 1.1474337861210543e-05,
"loss": 1.5246,
"num_input_tokens_seen": 210501632,
"step": 803
},
{
"epoch": 0.18083670715249664,
"grad_norm": 0.2819434702396393,
"learning_rate": 1.1363321325132447e-05,
"loss": 1.6224,
"num_input_tokens_seen": 210763776,
"step": 804
},
{
"epoch": 0.18106162843004947,
"grad_norm": 0.21161556243896484,
"learning_rate": 1.1252775564791024e-05,
"loss": 0.8181,
"num_input_tokens_seen": 211025920,
"step": 805
},
{
"epoch": 0.18128654970760233,
"grad_norm": 0.2685690224170685,
"learning_rate": 1.1142701927151456e-05,
"loss": 0.9464,
"num_input_tokens_seen": 211288064,
"step": 806
},
{
"epoch": 0.1815114709851552,
"grad_norm": 0.4813108742237091,
"learning_rate": 1.1033101753426283e-05,
"loss": 1.6346,
"num_input_tokens_seen": 211550208,
"step": 807
},
{
"epoch": 0.18173639226270805,
"grad_norm": 0.3357571065425873,
"learning_rate": 1.0923976379059058e-05,
"loss": 1.8358,
"num_input_tokens_seen": 211812352,
"step": 808
},
{
"epoch": 0.1819613135402609,
"grad_norm": 0.2748908996582031,
"learning_rate": 1.0815327133708015e-05,
"loss": 1.6321,
"num_input_tokens_seen": 212074496,
"step": 809
},
{
"epoch": 0.18218623481781376,
"grad_norm": 0.32630112767219543,
"learning_rate": 1.0707155341229901e-05,
"loss": 1.17,
"num_input_tokens_seen": 212336640,
"step": 810
},
{
"epoch": 0.18241115609536662,
"grad_norm": 0.29258018732070923,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.0201,
"num_input_tokens_seen": 212598784,
"step": 811
},
{
"epoch": 0.18263607737291948,
"grad_norm": 0.2514300048351288,
"learning_rate": 1.049224938121548e-05,
"loss": 0.9086,
"num_input_tokens_seen": 212860928,
"step": 812
},
{
"epoch": 0.18286099865047234,
"grad_norm": 0.30031341314315796,
"learning_rate": 1.0385517832240471e-05,
"loss": 0.7005,
"num_input_tokens_seen": 213123072,
"step": 813
},
{
"epoch": 0.1830859199280252,
"grad_norm": 0.38122648000717163,
"learning_rate": 1.0279268973229089e-05,
"loss": 0.9653,
"num_input_tokens_seen": 213385216,
"step": 814
},
{
"epoch": 0.18331084120557806,
"grad_norm": 0.319106787443161,
"learning_rate": 1.0173504098790187e-05,
"loss": 1.1675,
"num_input_tokens_seen": 213647360,
"step": 815
},
{
"epoch": 0.18353576248313092,
"grad_norm": 0.2643660008907318,
"learning_rate": 1.006822449763537e-05,
"loss": 1.5398,
"num_input_tokens_seen": 213909504,
"step": 816
},
{
"epoch": 0.18376068376068377,
"grad_norm": 0.2221175879240036,
"learning_rate": 9.963431452563332e-06,
"loss": 1.2352,
"num_input_tokens_seen": 214171648,
"step": 817
},
{
"epoch": 0.1839856050382366,
"grad_norm": 0.636135458946228,
"learning_rate": 9.859126240444283e-06,
"loss": 1.3639,
"num_input_tokens_seen": 214433792,
"step": 818
},
{
"epoch": 0.18421052631578946,
"grad_norm": 0.27148714661598206,
"learning_rate": 9.755310132204298e-06,
"loss": 1.2762,
"num_input_tokens_seen": 214695936,
"step": 819
},
{
"epoch": 0.18443544759334232,
"grad_norm": 0.2437310367822647,
"learning_rate": 9.651984392809914e-06,
"loss": 2.0397,
"num_input_tokens_seen": 214958080,
"step": 820
},
{
"epoch": 0.18466036887089518,
"grad_norm": 0.2802148461341858,
"learning_rate": 9.549150281252633e-06,
"loss": 1.5733,
"num_input_tokens_seen": 215220224,
"step": 821
},
{
"epoch": 0.18488529014844804,
"grad_norm": 0.39606234431266785,
"learning_rate": 9.446809050533678e-06,
"loss": 1.2963,
"num_input_tokens_seen": 215482368,
"step": 822
},
{
"epoch": 0.1851102114260009,
"grad_norm": 0.4500436782836914,
"learning_rate": 9.344961947648623e-06,
"loss": 1.9574,
"num_input_tokens_seen": 215744512,
"step": 823
},
{
"epoch": 0.18533513270355376,
"grad_norm": 0.3091246783733368,
"learning_rate": 9.243610213572285e-06,
"loss": 1.5413,
"num_input_tokens_seen": 216006656,
"step": 824
},
{
"epoch": 0.18556005398110662,
"grad_norm": 0.2845333516597748,
"learning_rate": 9.142755083243576e-06,
"loss": 2.1405,
"num_input_tokens_seen": 216268800,
"step": 825
},
{
"epoch": 0.18578497525865947,
"grad_norm": 0.47833433747291565,
"learning_rate": 9.042397785550405e-06,
"loss": 2.0458,
"num_input_tokens_seen": 216530944,
"step": 826
},
{
"epoch": 0.18600989653621233,
"grad_norm": 0.2986997067928314,
"learning_rate": 8.9425395433148e-06,
"loss": 1.3242,
"num_input_tokens_seen": 216793088,
"step": 827
},
{
"epoch": 0.1862348178137652,
"grad_norm": 0.2789519131183624,
"learning_rate": 8.843181573277902e-06,
"loss": 1.8162,
"num_input_tokens_seen": 217055232,
"step": 828
},
{
"epoch": 0.18645973909131805,
"grad_norm": 0.3428088128566742,
"learning_rate": 8.744325086085248e-06,
"loss": 1.5293,
"num_input_tokens_seen": 217317376,
"step": 829
},
{
"epoch": 0.1866846603688709,
"grad_norm": 0.37942934036254883,
"learning_rate": 8.645971286271904e-06,
"loss": 1.3963,
"num_input_tokens_seen": 217579520,
"step": 830
},
{
"epoch": 0.18690958164642374,
"grad_norm": 0.257756769657135,
"learning_rate": 8.548121372247918e-06,
"loss": 1.4567,
"num_input_tokens_seen": 217841664,
"step": 831
},
{
"epoch": 0.1871345029239766,
"grad_norm": 0.2964596748352051,
"learning_rate": 8.450776536283594e-06,
"loss": 1.4137,
"num_input_tokens_seen": 218103808,
"step": 832
},
{
"epoch": 0.18735942420152946,
"grad_norm": 0.3498225510120392,
"learning_rate": 8.353937964495029e-06,
"loss": 1.8612,
"num_input_tokens_seen": 218365952,
"step": 833
},
{
"epoch": 0.18758434547908232,
"grad_norm": 3.0258142948150635,
"learning_rate": 8.257606836829678e-06,
"loss": 1.6101,
"num_input_tokens_seen": 218628096,
"step": 834
},
{
"epoch": 0.18780926675663517,
"grad_norm": 0.2801116108894348,
"learning_rate": 8.16178432705192e-06,
"loss": 0.9278,
"num_input_tokens_seen": 218890240,
"step": 835
},
{
"epoch": 0.18803418803418803,
"grad_norm": 0.3314686119556427,
"learning_rate": 8.066471602728803e-06,
"loss": 1.9387,
"num_input_tokens_seen": 219152384,
"step": 836
},
{
"epoch": 0.1882591093117409,
"grad_norm": 0.348878413438797,
"learning_rate": 7.971669825215788e-06,
"loss": 1.322,
"num_input_tokens_seen": 219414528,
"step": 837
},
{
"epoch": 0.18848403058929375,
"grad_norm": 0.24432994425296783,
"learning_rate": 7.877380149642626e-06,
"loss": 1.3546,
"num_input_tokens_seen": 219676672,
"step": 838
},
{
"epoch": 0.1887089518668466,
"grad_norm": 0.320366770029068,
"learning_rate": 7.783603724899257e-06,
"loss": 1.5225,
"num_input_tokens_seen": 219938816,
"step": 839
},
{
"epoch": 0.18893387314439947,
"grad_norm": 0.2689548432826996,
"learning_rate": 7.690341693621805e-06,
"loss": 1.5467,
"num_input_tokens_seen": 220200960,
"step": 840
},
{
"epoch": 0.18915879442195233,
"grad_norm": 0.2117762565612793,
"learning_rate": 7.597595192178702e-06,
"loss": 1.0383,
"num_input_tokens_seen": 220463104,
"step": 841
},
{
"epoch": 0.18938371569950518,
"grad_norm": 0.26459306478500366,
"learning_rate": 7.505365350656812e-06,
"loss": 1.8679,
"num_input_tokens_seen": 220725248,
"step": 842
},
{
"epoch": 0.18960863697705804,
"grad_norm": 0.28376010060310364,
"learning_rate": 7.413653292847617e-06,
"loss": 1.6637,
"num_input_tokens_seen": 220987392,
"step": 843
},
{
"epoch": 0.18983355825461087,
"grad_norm": 0.3108251392841339,
"learning_rate": 7.322460136233622e-06,
"loss": 2.0003,
"num_input_tokens_seen": 221249536,
"step": 844
},
{
"epoch": 0.19005847953216373,
"grad_norm": 0.3655540645122528,
"learning_rate": 7.2317869919746705e-06,
"loss": 1.499,
"num_input_tokens_seen": 221511680,
"step": 845
},
{
"epoch": 0.1902834008097166,
"grad_norm": 1.4836020469665527,
"learning_rate": 7.1416349648943894e-06,
"loss": 1.6247,
"num_input_tokens_seen": 221773824,
"step": 846
},
{
"epoch": 0.19050832208726945,
"grad_norm": 1.6627871990203857,
"learning_rate": 7.052005153466779e-06,
"loss": 1.7312,
"num_input_tokens_seen": 222035968,
"step": 847
},
{
"epoch": 0.1907332433648223,
"grad_norm": 0.2904163897037506,
"learning_rate": 6.962898649802823e-06,
"loss": 1.4695,
"num_input_tokens_seen": 222298112,
"step": 848
},
{
"epoch": 0.19095816464237517,
"grad_norm": 0.3054295480251312,
"learning_rate": 6.874316539637127e-06,
"loss": 1.5799,
"num_input_tokens_seen": 222560256,
"step": 849
},
{
"epoch": 0.19118308591992803,
"grad_norm": 0.269171804189682,
"learning_rate": 6.786259902314768e-06,
"loss": 1.3127,
"num_input_tokens_seen": 222822400,
"step": 850
},
{
"epoch": 0.19140800719748088,
"grad_norm": 0.29726889729499817,
"learning_rate": 6.698729810778065e-06,
"loss": 1.7892,
"num_input_tokens_seen": 223084544,
"step": 851
},
{
"epoch": 0.19163292847503374,
"grad_norm": 0.3201026916503906,
"learning_rate": 6.611727331553586e-06,
"loss": 1.3596,
"num_input_tokens_seen": 223346688,
"step": 852
},
{
"epoch": 0.1918578497525866,
"grad_norm": 0.3100236654281616,
"learning_rate": 6.52525352473905e-06,
"loss": 1.1841,
"num_input_tokens_seen": 223608832,
"step": 853
},
{
"epoch": 0.19208277103013946,
"grad_norm": 0.428475946187973,
"learning_rate": 6.439309443990532e-06,
"loss": 2.2884,
"num_input_tokens_seen": 223870976,
"step": 854
},
{
"epoch": 0.19230769230769232,
"grad_norm": 0.28759607672691345,
"learning_rate": 6.353896136509524e-06,
"loss": 1.6141,
"num_input_tokens_seen": 224133120,
"step": 855
},
{
"epoch": 0.19253261358524518,
"grad_norm": 0.23132166266441345,
"learning_rate": 6.269014643030213e-06,
"loss": 0.8094,
"num_input_tokens_seen": 224395264,
"step": 856
},
{
"epoch": 0.192757534862798,
"grad_norm": 0.2636610269546509,
"learning_rate": 6.184665997806832e-06,
"loss": 1.4375,
"num_input_tokens_seen": 224657408,
"step": 857
},
{
"epoch": 0.19298245614035087,
"grad_norm": 0.24574293196201324,
"learning_rate": 6.100851228600973e-06,
"loss": 1.0708,
"num_input_tokens_seen": 224919552,
"step": 858
},
{
"epoch": 0.19320737741790373,
"grad_norm": 0.22492122650146484,
"learning_rate": 6.017571356669183e-06,
"loss": 1.2114,
"num_input_tokens_seen": 225181696,
"step": 859
},
{
"epoch": 0.19343229869545658,
"grad_norm": 0.3934623897075653,
"learning_rate": 5.934827396750392e-06,
"loss": 2.2453,
"num_input_tokens_seen": 225443840,
"step": 860
},
{
"epoch": 0.19365721997300944,
"grad_norm": 0.2976413667201996,
"learning_rate": 5.852620357053651e-06,
"loss": 1.529,
"num_input_tokens_seen": 225705984,
"step": 861
},
{
"epoch": 0.1938821412505623,
"grad_norm": 0.2744722068309784,
"learning_rate": 5.770951239245803e-06,
"loss": 1.1937,
"num_input_tokens_seen": 225968128,
"step": 862
},
{
"epoch": 0.19410706252811516,
"grad_norm": 0.2966805100440979,
"learning_rate": 5.689821038439263e-06,
"loss": 1.6629,
"num_input_tokens_seen": 226230272,
"step": 863
},
{
"epoch": 0.19433198380566802,
"grad_norm": 0.2719520926475525,
"learning_rate": 5.6092307431799384e-06,
"loss": 1.2716,
"num_input_tokens_seen": 226492416,
"step": 864
},
{
"epoch": 0.19455690508322088,
"grad_norm": 0.2147417515516281,
"learning_rate": 5.529181335435124e-06,
"loss": 0.9605,
"num_input_tokens_seen": 226754560,
"step": 865
},
{
"epoch": 0.19478182636077374,
"grad_norm": 0.3258356750011444,
"learning_rate": 5.449673790581611e-06,
"loss": 1.8107,
"num_input_tokens_seen": 227016704,
"step": 866
},
{
"epoch": 0.1950067476383266,
"grad_norm": 0.27222108840942383,
"learning_rate": 5.370709077393721e-06,
"loss": 1.3168,
"num_input_tokens_seen": 227278848,
"step": 867
},
{
"epoch": 0.19523166891587945,
"grad_norm": 0.2095833271741867,
"learning_rate": 5.292288158031594e-06,
"loss": 1.3649,
"num_input_tokens_seen": 227540992,
"step": 868
},
{
"epoch": 0.1954565901934323,
"grad_norm": 0.31206318736076355,
"learning_rate": 5.214411988029355e-06,
"loss": 1.3449,
"num_input_tokens_seen": 227803136,
"step": 869
},
{
"epoch": 0.19568151147098514,
"grad_norm": 0.3639450669288635,
"learning_rate": 5.137081516283581e-06,
"loss": 1.6845,
"num_input_tokens_seen": 228065280,
"step": 870
},
{
"epoch": 0.195906432748538,
"grad_norm": 0.2893346846103668,
"learning_rate": 5.060297685041659e-06,
"loss": 1.2223,
"num_input_tokens_seen": 228327424,
"step": 871
},
{
"epoch": 0.19613135402609086,
"grad_norm": 0.21746279299259186,
"learning_rate": 4.984061429890324e-06,
"loss": 1.1666,
"num_input_tokens_seen": 228589568,
"step": 872
},
{
"epoch": 0.19635627530364372,
"grad_norm": 0.26073527336120605,
"learning_rate": 4.908373679744316e-06,
"loss": 1.7673,
"num_input_tokens_seen": 228851712,
"step": 873
},
{
"epoch": 0.19658119658119658,
"grad_norm": 0.5019742250442505,
"learning_rate": 4.833235356834959e-06,
"loss": 0.8521,
"num_input_tokens_seen": 229113856,
"step": 874
},
{
"epoch": 0.19680611785874944,
"grad_norm": 0.3473358750343323,
"learning_rate": 4.758647376699032e-06,
"loss": 1.3868,
"num_input_tokens_seen": 229376000,
"step": 875
},
{
"epoch": 0.1970310391363023,
"grad_norm": 0.32887259125709534,
"learning_rate": 4.684610648167503e-06,
"loss": 1.5076,
"num_input_tokens_seen": 229638144,
"step": 876
},
{
"epoch": 0.19725596041385515,
"grad_norm": 1.0063308477401733,
"learning_rate": 4.611126073354571e-06,
"loss": 1.422,
"num_input_tokens_seen": 229900288,
"step": 877
},
{
"epoch": 0.197480881691408,
"grad_norm": 0.21128523349761963,
"learning_rate": 4.538194547646574e-06,
"loss": 1.4598,
"num_input_tokens_seen": 230162432,
"step": 878
},
{
"epoch": 0.19770580296896087,
"grad_norm": 0.28047022223472595,
"learning_rate": 4.465816959691149e-06,
"loss": 1.0795,
"num_input_tokens_seen": 230424576,
"step": 879
},
{
"epoch": 0.19793072424651373,
"grad_norm": 0.38452693819999695,
"learning_rate": 4.3939941913863525e-06,
"loss": 1.6362,
"num_input_tokens_seen": 230686720,
"step": 880
},
{
"epoch": 0.1981556455240666,
"grad_norm": 0.27827146649360657,
"learning_rate": 4.322727117869951e-06,
"loss": 1.4264,
"num_input_tokens_seen": 230948864,
"step": 881
},
{
"epoch": 0.19838056680161945,
"grad_norm": 0.2739766836166382,
"learning_rate": 4.2520166075087635e-06,
"loss": 1.2018,
"num_input_tokens_seen": 231211008,
"step": 882
},
{
"epoch": 0.19860548807917228,
"grad_norm": 0.21906918287277222,
"learning_rate": 4.181863521888019e-06,
"loss": 0.8336,
"num_input_tokens_seen": 231473152,
"step": 883
},
{
"epoch": 0.19883040935672514,
"grad_norm": 0.4255640208721161,
"learning_rate": 4.112268715800943e-06,
"loss": 1.2028,
"num_input_tokens_seen": 231735296,
"step": 884
},
{
"epoch": 0.199055330634278,
"grad_norm": 0.24651283025741577,
"learning_rate": 4.043233037238281e-06,
"loss": 1.2193,
"num_input_tokens_seen": 231997440,
"step": 885
},
{
"epoch": 0.19928025191183085,
"grad_norm": 0.7165899872779846,
"learning_rate": 3.974757327377981e-06,
"loss": 1.5189,
"num_input_tokens_seen": 232259584,
"step": 886
},
{
"epoch": 0.1995051731893837,
"grad_norm": 0.30502429604530334,
"learning_rate": 3.90684242057498e-06,
"loss": 1.6756,
"num_input_tokens_seen": 232521728,
"step": 887
},
{
"epoch": 0.19973009446693657,
"grad_norm": 0.3749113380908966,
"learning_rate": 3.839489144350955e-06,
"loss": 1.5954,
"num_input_tokens_seen": 232783872,
"step": 888
},
{
"epoch": 0.19995501574448943,
"grad_norm": 0.29872778058052063,
"learning_rate": 3.772698319384349e-06,
"loss": 0.999,
"num_input_tokens_seen": 233046016,
"step": 889
},
{
"epoch": 0.2001799370220423,
"grad_norm": 0.47089534997940063,
"learning_rate": 3.7064707595002635e-06,
"loss": 1.6206,
"num_input_tokens_seen": 233308160,
"step": 890
},
{
"epoch": 0.20040485829959515,
"grad_norm": 0.4469537138938904,
"learning_rate": 3.6408072716606346e-06,
"loss": 1.3996,
"num_input_tokens_seen": 233570304,
"step": 891
},
{
"epoch": 0.200629779577148,
"grad_norm": 0.283834308385849,
"learning_rate": 3.575708655954324e-06,
"loss": 1.3198,
"num_input_tokens_seen": 233832448,
"step": 892
},
{
"epoch": 0.20085470085470086,
"grad_norm": 0.4261209964752197,
"learning_rate": 3.511175705587433e-06,
"loss": 1.1136,
"num_input_tokens_seen": 234094592,
"step": 893
},
{
"epoch": 0.20107962213225372,
"grad_norm": 0.2869652211666107,
"learning_rate": 3.4472092068735916e-06,
"loss": 1.0926,
"num_input_tokens_seen": 234356736,
"step": 894
},
{
"epoch": 0.20130454340980658,
"grad_norm": 0.18117663264274597,
"learning_rate": 3.3838099392243916e-06,
"loss": 0.9276,
"num_input_tokens_seen": 234618880,
"step": 895
},
{
"epoch": 0.2015294646873594,
"grad_norm": 0.49210941791534424,
"learning_rate": 3.3209786751399187e-06,
"loss": 1.5335,
"num_input_tokens_seen": 234881024,
"step": 896
},
{
"epoch": 0.20175438596491227,
"grad_norm": 0.35008394718170166,
"learning_rate": 3.258716180199278e-06,
"loss": 1.8602,
"num_input_tokens_seen": 235143168,
"step": 897
},
{
"epoch": 0.20197930724246513,
"grad_norm": 0.4784037470817566,
"learning_rate": 3.197023213051337e-06,
"loss": 1.5308,
"num_input_tokens_seen": 235405312,
"step": 898
},
{
"epoch": 0.202204228520018,
"grad_norm": 0.2999178171157837,
"learning_rate": 3.1359005254054273e-06,
"loss": 1.5131,
"num_input_tokens_seen": 235667456,
"step": 899
},
{
"epoch": 0.20242914979757085,
"grad_norm": 0.3838978707790375,
"learning_rate": 3.0753488620222037e-06,
"loss": 1.6273,
"num_input_tokens_seen": 235929600,
"step": 900
},
{
"epoch": 0.2026540710751237,
"grad_norm": 0.30641281604766846,
"learning_rate": 3.0153689607045845e-06,
"loss": 1.7154,
"num_input_tokens_seen": 236191744,
"step": 901
},
{
"epoch": 0.20287899235267656,
"grad_norm": 0.379870742559433,
"learning_rate": 2.9559615522887273e-06,
"loss": 1.8713,
"num_input_tokens_seen": 236453888,
"step": 902
},
{
"epoch": 0.20310391363022942,
"grad_norm": 0.31836485862731934,
"learning_rate": 2.8971273606351658e-06,
"loss": 1.4777,
"num_input_tokens_seen": 236716032,
"step": 903
},
{
"epoch": 0.20332883490778228,
"grad_norm": 0.2330200970172882,
"learning_rate": 2.8388671026199522e-06,
"loss": 1.17,
"num_input_tokens_seen": 236978176,
"step": 904
},
{
"epoch": 0.20355375618533514,
"grad_norm": 0.2553198039531708,
"learning_rate": 2.7811814881259503e-06,
"loss": 0.7473,
"num_input_tokens_seen": 237240320,
"step": 905
},
{
"epoch": 0.203778677462888,
"grad_norm": 0.2429661601781845,
"learning_rate": 2.724071220034158e-06,
"loss": 1.6603,
"num_input_tokens_seen": 237502464,
"step": 906
},
{
"epoch": 0.20400359874044086,
"grad_norm": 0.23130248486995697,
"learning_rate": 2.667536994215186e-06,
"loss": 1.1438,
"num_input_tokens_seen": 237764608,
"step": 907
},
{
"epoch": 0.20422852001799371,
"grad_norm": 0.4475621283054352,
"learning_rate": 2.611579499520722e-06,
"loss": 1.3467,
"num_input_tokens_seen": 238026752,
"step": 908
},
{
"epoch": 0.20445344129554655,
"grad_norm": 0.4158174991607666,
"learning_rate": 2.5561994177751737e-06,
"loss": 1.9419,
"num_input_tokens_seen": 238288896,
"step": 909
},
{
"epoch": 0.2046783625730994,
"grad_norm": 0.3838602602481842,
"learning_rate": 2.501397423767382e-06,
"loss": 1.4355,
"num_input_tokens_seen": 238551040,
"step": 910
},
{
"epoch": 0.20490328385065226,
"grad_norm": 0.28037744760513306,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.4613,
"num_input_tokens_seen": 238813184,
"step": 911
},
{
"epoch": 0.20512820512820512,
"grad_norm": 0.370734304189682,
"learning_rate": 2.3935303628930707e-06,
"loss": 1.5199,
"num_input_tokens_seen": 239075328,
"step": 912
},
{
"epoch": 0.20535312640575798,
"grad_norm": 0.38821879029273987,
"learning_rate": 2.340466610352654e-06,
"loss": 1.5737,
"num_input_tokens_seen": 239337472,
"step": 913
},
{
"epoch": 0.20557804768331084,
"grad_norm": 0.2714470624923706,
"learning_rate": 2.2879835741861586e-06,
"loss": 1.9517,
"num_input_tokens_seen": 239599616,
"step": 914
},
{
"epoch": 0.2058029689608637,
"grad_norm": 0.2324371039867401,
"learning_rate": 2.2360818938828187e-06,
"loss": 1.3721,
"num_input_tokens_seen": 239861760,
"step": 915
},
{
"epoch": 0.20602789023841656,
"grad_norm": 0.27908140420913696,
"learning_rate": 2.1847622018482283e-06,
"loss": 1.8328,
"num_input_tokens_seen": 240123904,
"step": 916
},
{
"epoch": 0.20625281151596941,
"grad_norm": 0.28744250535964966,
"learning_rate": 2.134025123396638e-06,
"loss": 1.7052,
"num_input_tokens_seen": 240386048,
"step": 917
},
{
"epoch": 0.20647773279352227,
"grad_norm": 0.28908395767211914,
"learning_rate": 2.0838712767433375e-06,
"loss": 1.5337,
"num_input_tokens_seen": 240648192,
"step": 918
},
{
"epoch": 0.20670265407107513,
"grad_norm": 0.3027624189853668,
"learning_rate": 2.0343012729971243e-06,
"loss": 1.5177,
"num_input_tokens_seen": 240910336,
"step": 919
},
{
"epoch": 0.206927575348628,
"grad_norm": 0.22034700214862823,
"learning_rate": 1.985315716152847e-06,
"loss": 0.9706,
"num_input_tokens_seen": 241172480,
"step": 920
},
{
"epoch": 0.20715249662618085,
"grad_norm": 0.2736298739910126,
"learning_rate": 1.9369152030840556e-06,
"loss": 1.5079,
"num_input_tokens_seen": 241434624,
"step": 921
},
{
"epoch": 0.20737741790373368,
"grad_norm": 0.39781084656715393,
"learning_rate": 1.8891003235357308e-06,
"loss": 1.5232,
"num_input_tokens_seen": 241696768,
"step": 922
},
{
"epoch": 0.20760233918128654,
"grad_norm": 0.3146691620349884,
"learning_rate": 1.841871660117095e-06,
"loss": 1.32,
"num_input_tokens_seen": 241958912,
"step": 923
},
{
"epoch": 0.2078272604588394,
"grad_norm": 0.34817108511924744,
"learning_rate": 1.7952297882945003e-06,
"loss": 1.1133,
"num_input_tokens_seen": 242221056,
"step": 924
},
{
"epoch": 0.20805218173639226,
"grad_norm": 0.4052412211894989,
"learning_rate": 1.7491752763844293e-06,
"loss": 1.3553,
"num_input_tokens_seen": 242483200,
"step": 925
},
{
"epoch": 0.20827710301394511,
"grad_norm": 0.28856000304222107,
"learning_rate": 1.70370868554659e-06,
"loss": 1.3245,
"num_input_tokens_seen": 242745344,
"step": 926
},
{
"epoch": 0.20850202429149797,
"grad_norm": 0.2518732249736786,
"learning_rate": 1.6588305697770313e-06,
"loss": 1.2752,
"num_input_tokens_seen": 243007488,
"step": 927
},
{
"epoch": 0.20872694556905083,
"grad_norm": 0.2444835901260376,
"learning_rate": 1.6145414759014431e-06,
"loss": 1.1666,
"num_input_tokens_seen": 243269632,
"step": 928
},
{
"epoch": 0.2089518668466037,
"grad_norm": 0.23631195724010468,
"learning_rate": 1.5708419435684462e-06,
"loss": 1.2224,
"num_input_tokens_seen": 243531776,
"step": 929
},
{
"epoch": 0.20917678812415655,
"grad_norm": 2.2602646350860596,
"learning_rate": 1.5277325052430568e-06,
"loss": 1.954,
"num_input_tokens_seen": 243793920,
"step": 930
},
{
"epoch": 0.2094017094017094,
"grad_norm": 0.31157422065734863,
"learning_rate": 1.4852136862001764e-06,
"loss": 1.781,
"num_input_tokens_seen": 244056064,
"step": 931
},
{
"epoch": 0.20962663067926227,
"grad_norm": 0.3314647078514099,
"learning_rate": 1.4432860045182017e-06,
"loss": 2.0094,
"num_input_tokens_seen": 244318208,
"step": 932
},
{
"epoch": 0.20985155195681512,
"grad_norm": 0.2697344422340393,
"learning_rate": 1.4019499710726913e-06,
"loss": 1.1265,
"num_input_tokens_seen": 244580352,
"step": 933
},
{
"epoch": 0.21007647323436798,
"grad_norm": 0.7156555652618408,
"learning_rate": 1.3612060895301759e-06,
"loss": 1.3521,
"num_input_tokens_seen": 244842496,
"step": 934
},
{
"epoch": 0.21030139451192081,
"grad_norm": 0.20836858451366425,
"learning_rate": 1.3210548563419856e-06,
"loss": 0.9915,
"num_input_tokens_seen": 245104640,
"step": 935
},
{
"epoch": 0.21052631578947367,
"grad_norm": 0.47020024061203003,
"learning_rate": 1.2814967607382432e-06,
"loss": 1.2631,
"num_input_tokens_seen": 245366784,
"step": 936
},
{
"epoch": 0.21075123706702653,
"grad_norm": 0.5516555905342102,
"learning_rate": 1.2425322847218368e-06,
"loss": 1.318,
"num_input_tokens_seen": 245628928,
"step": 937
},
{
"epoch": 0.2109761583445794,
"grad_norm": 0.2941971719264984,
"learning_rate": 1.2041619030626284e-06,
"loss": 1.2588,
"num_input_tokens_seen": 245891072,
"step": 938
},
{
"epoch": 0.21120107962213225,
"grad_norm": 0.3142881691455841,
"learning_rate": 1.166386083291604e-06,
"loss": 1.5491,
"num_input_tokens_seen": 246153216,
"step": 939
},
{
"epoch": 0.2114260008996851,
"grad_norm": 0.33687466382980347,
"learning_rate": 1.1292052856952062e-06,
"loss": 1.7112,
"num_input_tokens_seen": 246415360,
"step": 940
},
{
"epoch": 0.21165092217723797,
"grad_norm": 0.23561953008174896,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.3672,
"num_input_tokens_seen": 246677504,
"step": 941
},
{
"epoch": 0.21187584345479082,
"grad_norm": 0.24282871186733246,
"learning_rate": 1.0566305619157502e-06,
"loss": 1.5446,
"num_input_tokens_seen": 246939648,
"step": 942
},
{
"epoch": 0.21210076473234368,
"grad_norm": 0.3530779778957367,
"learning_rate": 1.0212375200327973e-06,
"loss": 1.623,
"num_input_tokens_seen": 247201792,
"step": 943
},
{
"epoch": 0.21232568600989654,
"grad_norm": 0.2710437774658203,
"learning_rate": 9.864412689139123e-07,
"loss": 1.0911,
"num_input_tokens_seen": 247463936,
"step": 944
},
{
"epoch": 0.2125506072874494,
"grad_norm": 0.3001916706562042,
"learning_rate": 9.522422325404235e-07,
"loss": 2.0527,
"num_input_tokens_seen": 247726080,
"step": 945
},
{
"epoch": 0.21277552856500226,
"grad_norm": 0.5507198572158813,
"learning_rate": 9.186408276168013e-07,
"loss": 1.3133,
"num_input_tokens_seen": 247988224,
"step": 946
},
{
"epoch": 0.21300044984255512,
"grad_norm": 0.4351663887500763,
"learning_rate": 8.856374635655695e-07,
"loss": 1.5783,
"num_input_tokens_seen": 248250368,
"step": 947
},
{
"epoch": 0.21322537112010798,
"grad_norm": 0.352603942155838,
"learning_rate": 8.53232542522292e-07,
"loss": 1.1274,
"num_input_tokens_seen": 248512512,
"step": 948
},
{
"epoch": 0.2134502923976608,
"grad_norm": 0.25265082716941833,
"learning_rate": 8.214264593307098e-07,
"loss": 0.9922,
"num_input_tokens_seen": 248774656,
"step": 949
},
{
"epoch": 0.21367521367521367,
"grad_norm": 0.27736958861351013,
"learning_rate": 7.90219601537906e-07,
"loss": 1.0984,
"num_input_tokens_seen": 249036800,
"step": 950
},
{
"epoch": 0.21390013495276652,
"grad_norm": 0.22481057047843933,
"learning_rate": 7.596123493895991e-07,
"loss": 1.6848,
"num_input_tokens_seen": 249298944,
"step": 951
},
{
"epoch": 0.21412505623031938,
"grad_norm": 0.514091432094574,
"learning_rate": 7.296050758254957e-07,
"loss": 1.6769,
"num_input_tokens_seen": 249561088,
"step": 952
},
{
"epoch": 0.21434997750787224,
"grad_norm": 0.39093077182769775,
"learning_rate": 7.001981464747565e-07,
"loss": 1.7747,
"num_input_tokens_seen": 249823232,
"step": 953
},
{
"epoch": 0.2145748987854251,
"grad_norm": 0.3265654444694519,
"learning_rate": 6.713919196515317e-07,
"loss": 1.3205,
"num_input_tokens_seen": 250085376,
"step": 954
},
{
"epoch": 0.21479982006297796,
"grad_norm": 0.25656867027282715,
"learning_rate": 6.431867463506048e-07,
"loss": 1.0319,
"num_input_tokens_seen": 250347520,
"step": 955
},
{
"epoch": 0.21502474134053082,
"grad_norm": 0.31202375888824463,
"learning_rate": 6.15582970243117e-07,
"loss": 1.5286,
"num_input_tokens_seen": 250609664,
"step": 956
},
{
"epoch": 0.21524966261808368,
"grad_norm": 0.2782678008079529,
"learning_rate": 5.885809276723608e-07,
"loss": 1.8345,
"num_input_tokens_seen": 250871808,
"step": 957
},
{
"epoch": 0.21547458389563653,
"grad_norm": 0.2971605062484741,
"learning_rate": 5.621809476497098e-07,
"loss": 1.0085,
"num_input_tokens_seen": 251133952,
"step": 958
},
{
"epoch": 0.2156995051731894,
"grad_norm": 0.2913457751274109,
"learning_rate": 5.363833518505834e-07,
"loss": 1.1763,
"num_input_tokens_seen": 251396096,
"step": 959
},
{
"epoch": 0.21592442645074225,
"grad_norm": 0.38938575983047485,
"learning_rate": 5.111884546105506e-07,
"loss": 1.1027,
"num_input_tokens_seen": 251658240,
"step": 960
},
{
"epoch": 0.2161493477282951,
"grad_norm": 0.22164541482925415,
"learning_rate": 4.865965629214819e-07,
"loss": 0.7649,
"num_input_tokens_seen": 251920384,
"step": 961
},
{
"epoch": 0.21637426900584794,
"grad_norm": 0.47079649567604065,
"learning_rate": 4.6260797642782014e-07,
"loss": 1.5604,
"num_input_tokens_seen": 252182528,
"step": 962
},
{
"epoch": 0.2165991902834008,
"grad_norm": 0.27684617042541504,
"learning_rate": 4.392229874229159e-07,
"loss": 1.3192,
"num_input_tokens_seen": 252444672,
"step": 963
},
{
"epoch": 0.21682411156095366,
"grad_norm": 0.7537292838096619,
"learning_rate": 4.1644188084548063e-07,
"loss": 1.6492,
"num_input_tokens_seen": 252706816,
"step": 964
},
{
"epoch": 0.21704903283850652,
"grad_norm": 0.26928630471229553,
"learning_rate": 3.9426493427611177e-07,
"loss": 1.325,
"num_input_tokens_seen": 252968960,
"step": 965
},
{
"epoch": 0.21727395411605938,
"grad_norm": 0.2575208842754364,
"learning_rate": 3.7269241793390085e-07,
"loss": 1.5905,
"num_input_tokens_seen": 253231104,
"step": 966
},
{
"epoch": 0.21749887539361223,
"grad_norm": 0.23125195503234863,
"learning_rate": 3.517245946731529e-07,
"loss": 1.2487,
"num_input_tokens_seen": 253493248,
"step": 967
},
{
"epoch": 0.2177237966711651,
"grad_norm": 0.3252660036087036,
"learning_rate": 3.3136171998017775e-07,
"loss": 1.4039,
"num_input_tokens_seen": 253755392,
"step": 968
},
{
"epoch": 0.21794871794871795,
"grad_norm": 0.25830793380737305,
"learning_rate": 3.1160404197018154e-07,
"loss": 1.2268,
"num_input_tokens_seen": 254017536,
"step": 969
},
{
"epoch": 0.2181736392262708,
"grad_norm": 0.25022169947624207,
"learning_rate": 2.924518013842303e-07,
"loss": 1.4004,
"num_input_tokens_seen": 254279680,
"step": 970
},
{
"epoch": 0.21839856050382367,
"grad_norm": 0.2369910031557083,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.3025,
"num_input_tokens_seen": 254541824,
"step": 971
},
{
"epoch": 0.21862348178137653,
"grad_norm": 0.28356534242630005,
"learning_rate": 2.5596455856058963e-07,
"loss": 1.8499,
"num_input_tokens_seen": 254803968,
"step": 972
},
{
"epoch": 0.21884840305892939,
"grad_norm": 0.27577951550483704,
"learning_rate": 2.386300009084408e-07,
"loss": 0.9913,
"num_input_tokens_seen": 255066112,
"step": 973
},
{
"epoch": 0.21907332433648224,
"grad_norm": 0.6673069000244141,
"learning_rate": 2.219017698460002e-07,
"loss": 1.4078,
"num_input_tokens_seen": 255328256,
"step": 974
},
{
"epoch": 0.21929824561403508,
"grad_norm": 0.27162426710128784,
"learning_rate": 2.057800692014833e-07,
"loss": 1.8649,
"num_input_tokens_seen": 255590400,
"step": 975
},
{
"epoch": 0.21952316689158793,
"grad_norm": 0.46019431948661804,
"learning_rate": 1.9026509541272275e-07,
"loss": 1.4329,
"num_input_tokens_seen": 255852544,
"step": 976
},
{
"epoch": 0.2197480881691408,
"grad_norm": 0.28555309772491455,
"learning_rate": 1.753570375247815e-07,
"loss": 1.6036,
"num_input_tokens_seen": 256114688,
"step": 977
},
{
"epoch": 0.21997300944669365,
"grad_norm": 0.3448443114757538,
"learning_rate": 1.6105607718764347e-07,
"loss": 1.2266,
"num_input_tokens_seen": 256376832,
"step": 978
},
{
"epoch": 0.2201979307242465,
"grad_norm": 0.32888004183769226,
"learning_rate": 1.4736238865398765e-07,
"loss": 1.4776,
"num_input_tokens_seen": 256638976,
"step": 979
},
{
"epoch": 0.22042285200179937,
"grad_norm": 0.4627322256565094,
"learning_rate": 1.342761387770952e-07,
"loss": 1.3864,
"num_input_tokens_seen": 256901120,
"step": 980
},
{
"epoch": 0.22064777327935223,
"grad_norm": 0.3258611559867859,
"learning_rate": 1.2179748700879012e-07,
"loss": 1.3963,
"num_input_tokens_seen": 257163264,
"step": 981
},
{
"epoch": 0.22087269455690509,
"grad_norm": 0.47918933629989624,
"learning_rate": 1.0992658539750178e-07,
"loss": 1.6036,
"num_input_tokens_seen": 257425408,
"step": 982
},
{
"epoch": 0.22109761583445794,
"grad_norm": 0.22977593541145325,
"learning_rate": 9.866357858642205e-08,
"loss": 1.4893,
"num_input_tokens_seen": 257687552,
"step": 983
},
{
"epoch": 0.2213225371120108,
"grad_norm": 0.28588688373565674,
"learning_rate": 8.800860381173448e-08,
"loss": 1.9434,
"num_input_tokens_seen": 257949696,
"step": 984
},
{
"epoch": 0.22154745838956366,
"grad_norm": 0.25447309017181396,
"learning_rate": 7.796179090094891e-08,
"loss": 1.3682,
"num_input_tokens_seen": 258211840,
"step": 985
},
{
"epoch": 0.22177237966711652,
"grad_norm": 0.2756679058074951,
"learning_rate": 6.852326227130834e-08,
"loss": 1.2565,
"num_input_tokens_seen": 258473984,
"step": 986
},
{
"epoch": 0.22199730094466938,
"grad_norm": 0.40191200375556946,
"learning_rate": 5.969313292830125e-08,
"loss": 1.8224,
"num_input_tokens_seen": 258736128,
"step": 987
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.36071062088012695,
"learning_rate": 5.1471510464268236e-08,
"loss": 1.7145,
"num_input_tokens_seen": 258998272,
"step": 988
},
{
"epoch": 0.22244714349977507,
"grad_norm": 0.24019958078861237,
"learning_rate": 4.385849505708084e-08,
"loss": 1.6252,
"num_input_tokens_seen": 259260416,
"step": 989
},
{
"epoch": 0.22267206477732793,
"grad_norm": 0.3141970932483673,
"learning_rate": 3.685417946894254e-08,
"loss": 1.1591,
"num_input_tokens_seen": 259522560,
"step": 990
},
{
"epoch": 0.22289698605488079,
"grad_norm": 0.2234763503074646,
"learning_rate": 3.04586490452119e-08,
"loss": 1.467,
"num_input_tokens_seen": 259784704,
"step": 991
},
{
"epoch": 0.22312190733243364,
"grad_norm": 0.45924562215805054,
"learning_rate": 2.467198171342e-08,
"loss": 1.3947,
"num_input_tokens_seen": 260046848,
"step": 992
},
{
"epoch": 0.2233468286099865,
"grad_norm": 0.30663082003593445,
"learning_rate": 1.949424798228239e-08,
"loss": 0.8988,
"num_input_tokens_seen": 260308992,
"step": 993
},
{
"epoch": 0.22357174988753936,
"grad_norm": 0.3375454545021057,
"learning_rate": 1.4925510940844156e-08,
"loss": 1.8355,
"num_input_tokens_seen": 260571136,
"step": 994
},
{
"epoch": 0.22379667116509222,
"grad_norm": 0.28863683342933655,
"learning_rate": 1.096582625772502e-08,
"loss": 1.0786,
"num_input_tokens_seen": 260833280,
"step": 995
},
{
"epoch": 0.22402159244264508,
"grad_norm": 0.3245282471179962,
"learning_rate": 7.615242180436522e-09,
"loss": 1.0296,
"num_input_tokens_seen": 261095424,
"step": 996
},
{
"epoch": 0.22424651372019794,
"grad_norm": 0.3670142590999603,
"learning_rate": 4.873799534788059e-09,
"loss": 1.6158,
"num_input_tokens_seen": 261357568,
"step": 997
},
{
"epoch": 0.2244714349977508,
"grad_norm": 0.22898231446743011,
"learning_rate": 2.741531724392843e-09,
"loss": 1.0204,
"num_input_tokens_seen": 261619712,
"step": 998
},
{
"epoch": 0.22469635627530365,
"grad_norm": 0.28812992572784424,
"learning_rate": 1.2184647302626583e-09,
"loss": 1.9515,
"num_input_tokens_seen": 261881856,
"step": 999
},
{
"epoch": 0.2249212775528565,
"grad_norm": 0.2852112948894501,
"learning_rate": 3.0461711048035415e-10,
"loss": 2.1085,
"num_input_tokens_seen": 262144000,
"step": 1000
},
{
"epoch": 0.2249212775528565,
"num_input_tokens_seen": 262144000,
"step": 1000,
"total_flos": 5.715019849269248e+18,
"train_loss": 1.4907063425183296,
"train_runtime": 24201.9893,
"train_samples_per_second": 0.331,
"train_steps_per_second": 0.041
}
],
"logging_steps": 1.0,
"max_steps": 1000,
"num_input_tokens_seen": 262144000,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.715019849269248e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}