aswin_llama_star / trainer_state.json
Divij's picture
Upload folder using huggingface_hub
67af8b2 verified
{
"best_global_step": 1400,
"best_metric": 0.6364541053771973,
"best_model_checkpoint": "/scr/dhanda/projects/sampling_inference/trained_models/aswin_llama_star/checkpoint-1400",
"epoch": 1.0,
"eval_steps": 100,
"global_step": 1441,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006939625260235947,
"grad_norm": 8.436548233032227,
"learning_rate": 2.7586206896551725e-06,
"loss": 0.8313,
"step": 10
},
{
"epoch": 0.013879250520471894,
"grad_norm": 5.655324935913086,
"learning_rate": 6.206896551724138e-06,
"loss": 0.5995,
"step": 20
},
{
"epoch": 0.020818875780707843,
"grad_norm": 6.097832202911377,
"learning_rate": 9.655172413793103e-06,
"loss": 0.5897,
"step": 30
},
{
"epoch": 0.027758501040943788,
"grad_norm": 5.244992256164551,
"learning_rate": 1.310344827586207e-05,
"loss": 0.5975,
"step": 40
},
{
"epoch": 0.03469812630117974,
"grad_norm": 5.82904577255249,
"learning_rate": 1.6551724137931037e-05,
"loss": 0.5866,
"step": 50
},
{
"epoch": 0.041637751561415685,
"grad_norm": 4.755958557128906,
"learning_rate": 2e-05,
"loss": 0.6143,
"step": 60
},
{
"epoch": 0.048577376821651634,
"grad_norm": 4.591907024383545,
"learning_rate": 2.3448275862068967e-05,
"loss": 0.6709,
"step": 70
},
{
"epoch": 0.055517002081887576,
"grad_norm": 5.520663738250732,
"learning_rate": 2.689655172413793e-05,
"loss": 0.6815,
"step": 80
},
{
"epoch": 0.062456627342123525,
"grad_norm": 5.258326530456543,
"learning_rate": 3.0344827586206897e-05,
"loss": 0.7171,
"step": 90
},
{
"epoch": 0.06939625260235947,
"grad_norm": 4.56249475479126,
"learning_rate": 3.3793103448275865e-05,
"loss": 0.7289,
"step": 100
},
{
"epoch": 0.06939625260235947,
"eval_loss": 0.7279375195503235,
"eval_runtime": 49.9788,
"eval_samples_per_second": 28.832,
"eval_steps_per_second": 7.223,
"step": 100
},
{
"epoch": 0.07633587786259542,
"grad_norm": 3.349087715148926,
"learning_rate": 3.724137931034483e-05,
"loss": 0.7553,
"step": 110
},
{
"epoch": 0.08327550312283137,
"grad_norm": 3.4430437088012695,
"learning_rate": 4.0689655172413795e-05,
"loss": 0.7228,
"step": 120
},
{
"epoch": 0.09021512838306732,
"grad_norm": 4.375394821166992,
"learning_rate": 4.413793103448276e-05,
"loss": 0.8678,
"step": 130
},
{
"epoch": 0.09715475364330327,
"grad_norm": 4.496102333068848,
"learning_rate": 4.7586206896551725e-05,
"loss": 0.8478,
"step": 140
},
{
"epoch": 0.1040943789035392,
"grad_norm": 4.770753383636475,
"learning_rate": 4.988425925925926e-05,
"loss": 0.824,
"step": 150
},
{
"epoch": 0.11103400416377515,
"grad_norm": 4.819082260131836,
"learning_rate": 4.949845679012346e-05,
"loss": 0.8833,
"step": 160
},
{
"epoch": 0.1179736294240111,
"grad_norm": 5.851232528686523,
"learning_rate": 4.911265432098766e-05,
"loss": 0.8861,
"step": 170
},
{
"epoch": 0.12491325468424705,
"grad_norm": 3.9091575145721436,
"learning_rate": 4.8726851851851855e-05,
"loss": 0.9043,
"step": 180
},
{
"epoch": 0.131852879944483,
"grad_norm": 4.504231929779053,
"learning_rate": 4.834104938271605e-05,
"loss": 0.909,
"step": 190
},
{
"epoch": 0.13879250520471895,
"grad_norm": 5.178035259246826,
"learning_rate": 4.795524691358025e-05,
"loss": 0.8768,
"step": 200
},
{
"epoch": 0.13879250520471895,
"eval_loss": 0.8475821018218994,
"eval_runtime": 49.8841,
"eval_samples_per_second": 28.887,
"eval_steps_per_second": 7.237,
"step": 200
},
{
"epoch": 0.1457321304649549,
"grad_norm": 4.2687907218933105,
"learning_rate": 4.756944444444444e-05,
"loss": 0.7687,
"step": 210
},
{
"epoch": 0.15267175572519084,
"grad_norm": 3.8715438842773438,
"learning_rate": 4.7183641975308646e-05,
"loss": 0.8323,
"step": 220
},
{
"epoch": 0.1596113809854268,
"grad_norm": 5.992573261260986,
"learning_rate": 4.679783950617284e-05,
"loss": 0.829,
"step": 230
},
{
"epoch": 0.16655100624566274,
"grad_norm": 3.7021474838256836,
"learning_rate": 4.6412037037037034e-05,
"loss": 0.8627,
"step": 240
},
{
"epoch": 0.1734906315058987,
"grad_norm": 3.4208054542541504,
"learning_rate": 4.602623456790124e-05,
"loss": 0.9109,
"step": 250
},
{
"epoch": 0.18043025676613464,
"grad_norm": 3.720872163772583,
"learning_rate": 4.5640432098765436e-05,
"loss": 0.8255,
"step": 260
},
{
"epoch": 0.1873698820263706,
"grad_norm": 3.8880999088287354,
"learning_rate": 4.525462962962963e-05,
"loss": 0.8802,
"step": 270
},
{
"epoch": 0.19430950728660654,
"grad_norm": 3.356327772140503,
"learning_rate": 4.486882716049383e-05,
"loss": 0.9039,
"step": 280
},
{
"epoch": 0.20124913254684246,
"grad_norm": 3.6388580799102783,
"learning_rate": 4.448302469135803e-05,
"loss": 0.8776,
"step": 290
},
{
"epoch": 0.2081887578070784,
"grad_norm": 4.57245397567749,
"learning_rate": 4.4097222222222226e-05,
"loss": 0.8568,
"step": 300
},
{
"epoch": 0.2081887578070784,
"eval_loss": 0.8598970770835876,
"eval_runtime": 50.2014,
"eval_samples_per_second": 28.704,
"eval_steps_per_second": 7.191,
"step": 300
},
{
"epoch": 0.21512838306731435,
"grad_norm": 3.0655972957611084,
"learning_rate": 4.3711419753086424e-05,
"loss": 0.8248,
"step": 310
},
{
"epoch": 0.2220680083275503,
"grad_norm": 3.4082236289978027,
"learning_rate": 4.332561728395062e-05,
"loss": 0.8737,
"step": 320
},
{
"epoch": 0.22900763358778625,
"grad_norm": 4.789612770080566,
"learning_rate": 4.293981481481482e-05,
"loss": 0.8218,
"step": 330
},
{
"epoch": 0.2359472588480222,
"grad_norm": 3.7866945266723633,
"learning_rate": 4.255401234567901e-05,
"loss": 0.8208,
"step": 340
},
{
"epoch": 0.24288688410825815,
"grad_norm": 4.31322717666626,
"learning_rate": 4.2168209876543214e-05,
"loss": 0.7825,
"step": 350
},
{
"epoch": 0.2498265093684941,
"grad_norm": 3.4791786670684814,
"learning_rate": 4.178240740740741e-05,
"loss": 0.8161,
"step": 360
},
{
"epoch": 0.25676613462873005,
"grad_norm": 4.521151542663574,
"learning_rate": 4.13966049382716e-05,
"loss": 0.7744,
"step": 370
},
{
"epoch": 0.263705759888966,
"grad_norm": 2.1991310119628906,
"learning_rate": 4.104938271604938e-05,
"loss": 0.8381,
"step": 380
},
{
"epoch": 0.27064538514920194,
"grad_norm": 3.745811939239502,
"learning_rate": 4.066358024691358e-05,
"loss": 0.9447,
"step": 390
},
{
"epoch": 0.2775850104094379,
"grad_norm": 3.5998716354370117,
"learning_rate": 4.027777777777778e-05,
"loss": 0.8452,
"step": 400
},
{
"epoch": 0.2775850104094379,
"eval_loss": 0.8371486067771912,
"eval_runtime": 50.1497,
"eval_samples_per_second": 28.734,
"eval_steps_per_second": 7.198,
"step": 400
},
{
"epoch": 0.28452463566967384,
"grad_norm": 4.008808612823486,
"learning_rate": 3.9891975308641976e-05,
"loss": 0.8101,
"step": 410
},
{
"epoch": 0.2914642609299098,
"grad_norm": 4.993364334106445,
"learning_rate": 3.950617283950617e-05,
"loss": 0.9292,
"step": 420
},
{
"epoch": 0.29840388619014574,
"grad_norm": 3.597254514694214,
"learning_rate": 3.912037037037037e-05,
"loss": 0.8176,
"step": 430
},
{
"epoch": 0.3053435114503817,
"grad_norm": 4.86491584777832,
"learning_rate": 3.873456790123457e-05,
"loss": 0.7595,
"step": 440
},
{
"epoch": 0.31228313671061764,
"grad_norm": 4.191522121429443,
"learning_rate": 3.8348765432098766e-05,
"loss": 0.8463,
"step": 450
},
{
"epoch": 0.3192227619708536,
"grad_norm": 3.8313114643096924,
"learning_rate": 3.7962962962962964e-05,
"loss": 0.815,
"step": 460
},
{
"epoch": 0.32616238723108953,
"grad_norm": 2.289886951446533,
"learning_rate": 3.757716049382716e-05,
"loss": 0.7598,
"step": 470
},
{
"epoch": 0.3331020124913255,
"grad_norm": 3.6466236114501953,
"learning_rate": 3.719135802469136e-05,
"loss": 0.7771,
"step": 480
},
{
"epoch": 0.34004163775156143,
"grad_norm": 3.2481741905212402,
"learning_rate": 3.6805555555555556e-05,
"loss": 0.7833,
"step": 490
},
{
"epoch": 0.3469812630117974,
"grad_norm": 5.6007208824157715,
"learning_rate": 3.6419753086419754e-05,
"loss": 0.8243,
"step": 500
},
{
"epoch": 0.3469812630117974,
"eval_loss": 0.8030129671096802,
"eval_runtime": 50.3084,
"eval_samples_per_second": 28.643,
"eval_steps_per_second": 7.176,
"step": 500
},
{
"epoch": 0.35392088827203333,
"grad_norm": 4.155673503875732,
"learning_rate": 3.603395061728395e-05,
"loss": 0.7968,
"step": 510
},
{
"epoch": 0.3608605135322693,
"grad_norm": 3.114962577819824,
"learning_rate": 3.564814814814815e-05,
"loss": 0.815,
"step": 520
},
{
"epoch": 0.3678001387925052,
"grad_norm": 4.049103736877441,
"learning_rate": 3.526234567901235e-05,
"loss": 0.8768,
"step": 530
},
{
"epoch": 0.3747397640527412,
"grad_norm": 3.64631724357605,
"learning_rate": 3.4876543209876545e-05,
"loss": 0.7909,
"step": 540
},
{
"epoch": 0.3816793893129771,
"grad_norm": 3.093494176864624,
"learning_rate": 3.449074074074074e-05,
"loss": 0.7576,
"step": 550
},
{
"epoch": 0.3886190145732131,
"grad_norm": 3.2251219749450684,
"learning_rate": 3.410493827160494e-05,
"loss": 0.813,
"step": 560
},
{
"epoch": 0.39555863983344897,
"grad_norm": 2.702892303466797,
"learning_rate": 3.371913580246914e-05,
"loss": 0.7765,
"step": 570
},
{
"epoch": 0.4024982650936849,
"grad_norm": 3.3482439517974854,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.8653,
"step": 580
},
{
"epoch": 0.40943789035392086,
"grad_norm": 2.537188768386841,
"learning_rate": 3.294753086419753e-05,
"loss": 0.7409,
"step": 590
},
{
"epoch": 0.4163775156141568,
"grad_norm": 3.178288459777832,
"learning_rate": 3.256172839506173e-05,
"loss": 0.7191,
"step": 600
},
{
"epoch": 0.4163775156141568,
"eval_loss": 0.7933737635612488,
"eval_runtime": 50.9506,
"eval_samples_per_second": 28.282,
"eval_steps_per_second": 7.085,
"step": 600
},
{
"epoch": 0.42331714087439276,
"grad_norm": 3.4489400386810303,
"learning_rate": 3.221450617283951e-05,
"loss": 0.8456,
"step": 610
},
{
"epoch": 0.4302567661346287,
"grad_norm": 3.6795783042907715,
"learning_rate": 3.182870370370371e-05,
"loss": 0.8392,
"step": 620
},
{
"epoch": 0.43719639139486466,
"grad_norm": 2.7610459327697754,
"learning_rate": 3.14429012345679e-05,
"loss": 0.7238,
"step": 630
},
{
"epoch": 0.4441360166551006,
"grad_norm": 3.525322675704956,
"learning_rate": 3.10570987654321e-05,
"loss": 0.8437,
"step": 640
},
{
"epoch": 0.45107564191533656,
"grad_norm": 4.020907402038574,
"learning_rate": 3.06712962962963e-05,
"loss": 0.8097,
"step": 650
},
{
"epoch": 0.4580152671755725,
"grad_norm": 4.816994667053223,
"learning_rate": 3.0285493827160495e-05,
"loss": 0.7754,
"step": 660
},
{
"epoch": 0.46495489243580845,
"grad_norm": 2.7212016582489014,
"learning_rate": 2.9899691358024696e-05,
"loss": 0.7264,
"step": 670
},
{
"epoch": 0.4718945176960444,
"grad_norm": 2.6413140296936035,
"learning_rate": 2.951388888888889e-05,
"loss": 0.7636,
"step": 680
},
{
"epoch": 0.47883414295628035,
"grad_norm": 3.3349661827087402,
"learning_rate": 2.9128086419753087e-05,
"loss": 0.7971,
"step": 690
},
{
"epoch": 0.4857737682165163,
"grad_norm": 3.037588119506836,
"learning_rate": 2.8780864197530867e-05,
"loss": 0.7778,
"step": 700
},
{
"epoch": 0.4857737682165163,
"eval_loss": 0.7744709849357605,
"eval_runtime": 50.6673,
"eval_samples_per_second": 28.44,
"eval_steps_per_second": 7.125,
"step": 700
},
{
"epoch": 0.49271339347675225,
"grad_norm": 3.02775239944458,
"learning_rate": 2.839506172839506e-05,
"loss": 0.7377,
"step": 710
},
{
"epoch": 0.4996530187369882,
"grad_norm": 3.7178709506988525,
"learning_rate": 2.8009259259259263e-05,
"loss": 0.7315,
"step": 720
},
{
"epoch": 0.5065926439972241,
"grad_norm": 2.3943405151367188,
"learning_rate": 2.762345679012346e-05,
"loss": 0.6843,
"step": 730
},
{
"epoch": 0.5135322692574601,
"grad_norm": 3.1974751949310303,
"learning_rate": 2.7237654320987654e-05,
"loss": 0.7292,
"step": 740
},
{
"epoch": 0.520471894517696,
"grad_norm": 2.935885429382324,
"learning_rate": 2.6851851851851855e-05,
"loss": 0.8194,
"step": 750
},
{
"epoch": 0.527411519777932,
"grad_norm": 6.58482027053833,
"learning_rate": 2.6466049382716053e-05,
"loss": 0.6681,
"step": 760
},
{
"epoch": 0.5343511450381679,
"grad_norm": 2.9752790927886963,
"learning_rate": 2.6080246913580247e-05,
"loss": 0.8489,
"step": 770
},
{
"epoch": 0.5412907702984039,
"grad_norm": 3.415062427520752,
"learning_rate": 2.5694444444444445e-05,
"loss": 0.7535,
"step": 780
},
{
"epoch": 0.5482303955586398,
"grad_norm": 2.9602854251861572,
"learning_rate": 2.5308641975308646e-05,
"loss": 0.7386,
"step": 790
},
{
"epoch": 0.5551700208188758,
"grad_norm": 2.3397035598754883,
"learning_rate": 2.492283950617284e-05,
"loss": 0.6889,
"step": 800
},
{
"epoch": 0.5551700208188758,
"eval_loss": 0.7451881766319275,
"eval_runtime": 50.5382,
"eval_samples_per_second": 28.513,
"eval_steps_per_second": 7.143,
"step": 800
},
{
"epoch": 0.5621096460791117,
"grad_norm": 1.889366626739502,
"learning_rate": 2.4537037037037038e-05,
"loss": 0.7376,
"step": 810
},
{
"epoch": 0.5690492713393477,
"grad_norm": 3.3545074462890625,
"learning_rate": 2.4151234567901235e-05,
"loss": 0.7324,
"step": 820
},
{
"epoch": 0.5759888965995836,
"grad_norm": 3.243748426437378,
"learning_rate": 2.3765432098765433e-05,
"loss": 0.7056,
"step": 830
},
{
"epoch": 0.5829285218598196,
"grad_norm": 1.6572705507278442,
"learning_rate": 2.337962962962963e-05,
"loss": 0.7623,
"step": 840
},
{
"epoch": 0.5898681471200555,
"grad_norm": 3.6564319133758545,
"learning_rate": 2.2993827160493828e-05,
"loss": 0.8021,
"step": 850
},
{
"epoch": 0.5968077723802915,
"grad_norm": 2.7752833366394043,
"learning_rate": 2.2608024691358026e-05,
"loss": 0.6935,
"step": 860
},
{
"epoch": 0.6037473976405274,
"grad_norm": 4.253413677215576,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.762,
"step": 870
},
{
"epoch": 0.6106870229007634,
"grad_norm": 2.7186923027038574,
"learning_rate": 2.183641975308642e-05,
"loss": 0.7403,
"step": 880
},
{
"epoch": 0.6176266481609993,
"grad_norm": 2.6007790565490723,
"learning_rate": 2.145061728395062e-05,
"loss": 0.7119,
"step": 890
},
{
"epoch": 0.6245662734212353,
"grad_norm": 3.293909788131714,
"learning_rate": 2.1064814814814816e-05,
"loss": 0.6942,
"step": 900
},
{
"epoch": 0.6245662734212353,
"eval_loss": 0.7264400720596313,
"eval_runtime": 50.4954,
"eval_samples_per_second": 28.537,
"eval_steps_per_second": 7.149,
"step": 900
},
{
"epoch": 0.6315058986814712,
"grad_norm": 3.3802788257598877,
"learning_rate": 2.0679012345679014e-05,
"loss": 0.7141,
"step": 910
},
{
"epoch": 0.6384455239417072,
"grad_norm": 2.965416669845581,
"learning_rate": 2.029320987654321e-05,
"loss": 0.7806,
"step": 920
},
{
"epoch": 0.6453851492019431,
"grad_norm": 3.0134222507476807,
"learning_rate": 1.990740740740741e-05,
"loss": 0.6889,
"step": 930
},
{
"epoch": 0.6523247744621791,
"grad_norm": 3.078749895095825,
"learning_rate": 1.9521604938271607e-05,
"loss": 0.753,
"step": 940
},
{
"epoch": 0.659264399722415,
"grad_norm": 2.0064494609832764,
"learning_rate": 1.91358024691358e-05,
"loss": 0.6586,
"step": 950
},
{
"epoch": 0.666204024982651,
"grad_norm": 3.144228458404541,
"learning_rate": 1.8750000000000002e-05,
"loss": 0.7233,
"step": 960
},
{
"epoch": 0.6731436502428869,
"grad_norm": 3.3552615642547607,
"learning_rate": 1.83641975308642e-05,
"loss": 0.6848,
"step": 970
},
{
"epoch": 0.6800832755031229,
"grad_norm": 2.424555540084839,
"learning_rate": 1.7978395061728397e-05,
"loss": 0.711,
"step": 980
},
{
"epoch": 0.6870229007633588,
"grad_norm": 3.5036256313323975,
"learning_rate": 1.7592592592592595e-05,
"loss": 0.7794,
"step": 990
},
{
"epoch": 0.6939625260235948,
"grad_norm": 3.1521127223968506,
"learning_rate": 1.720679012345679e-05,
"loss": 0.7769,
"step": 1000
},
{
"epoch": 0.6939625260235948,
"eval_loss": 0.705424964427948,
"eval_runtime": 50.3767,
"eval_samples_per_second": 28.604,
"eval_steps_per_second": 7.166,
"step": 1000
},
{
"epoch": 0.7009021512838307,
"grad_norm": 3.177053689956665,
"learning_rate": 1.682098765432099e-05,
"loss": 0.7342,
"step": 1010
},
{
"epoch": 0.7078417765440667,
"grad_norm": 2.742203712463379,
"learning_rate": 1.6435185185185187e-05,
"loss": 0.6258,
"step": 1020
},
{
"epoch": 0.7147814018043026,
"grad_norm": 2.73271107673645,
"learning_rate": 1.604938271604938e-05,
"loss": 0.7548,
"step": 1030
},
{
"epoch": 0.7217210270645386,
"grad_norm": 2.823657751083374,
"learning_rate": 1.5663580246913583e-05,
"loss": 0.7083,
"step": 1040
},
{
"epoch": 0.7286606523247745,
"grad_norm": 2.787195920944214,
"learning_rate": 1.527777777777778e-05,
"loss": 0.6906,
"step": 1050
},
{
"epoch": 0.7356002775850105,
"grad_norm": 3.4323344230651855,
"learning_rate": 1.4891975308641976e-05,
"loss": 0.7352,
"step": 1060
},
{
"epoch": 0.7425399028452464,
"grad_norm": 2.84405779838562,
"learning_rate": 1.4506172839506174e-05,
"loss": 0.7109,
"step": 1070
},
{
"epoch": 0.7494795281054824,
"grad_norm": 2.3355183601379395,
"learning_rate": 1.412037037037037e-05,
"loss": 0.6481,
"step": 1080
},
{
"epoch": 0.7564191533657183,
"grad_norm": 3.446125030517578,
"learning_rate": 1.3734567901234569e-05,
"loss": 0.7126,
"step": 1090
},
{
"epoch": 0.7633587786259542,
"grad_norm": 3.240204334259033,
"learning_rate": 1.3348765432098767e-05,
"loss": 0.6816,
"step": 1100
},
{
"epoch": 0.7633587786259542,
"eval_loss": 0.6829991936683655,
"eval_runtime": 50.4945,
"eval_samples_per_second": 28.538,
"eval_steps_per_second": 7.149,
"step": 1100
},
{
"epoch": 0.7702984038861902,
"grad_norm": 3.2107372283935547,
"learning_rate": 1.2962962962962962e-05,
"loss": 0.6874,
"step": 1110
},
{
"epoch": 0.7772380291464261,
"grad_norm": 3.1716368198394775,
"learning_rate": 1.2577160493827162e-05,
"loss": 0.7069,
"step": 1120
},
{
"epoch": 0.7841776544066621,
"grad_norm": 2.7112536430358887,
"learning_rate": 1.219135802469136e-05,
"loss": 0.6721,
"step": 1130
},
{
"epoch": 0.7911172796668979,
"grad_norm": 3.2708733081817627,
"learning_rate": 1.1805555555555555e-05,
"loss": 0.6656,
"step": 1140
},
{
"epoch": 0.7980569049271339,
"grad_norm": 3.133509874343872,
"learning_rate": 1.1419753086419753e-05,
"loss": 0.6972,
"step": 1150
},
{
"epoch": 0.8049965301873698,
"grad_norm": 2.5478899478912354,
"learning_rate": 1.1033950617283952e-05,
"loss": 0.6708,
"step": 1160
},
{
"epoch": 0.8119361554476058,
"grad_norm": 3.2422120571136475,
"learning_rate": 1.0648148148148148e-05,
"loss": 0.6752,
"step": 1170
},
{
"epoch": 0.8188757807078417,
"grad_norm": 3.1517410278320312,
"learning_rate": 1.0262345679012346e-05,
"loss": 0.6282,
"step": 1180
},
{
"epoch": 0.8258154059680777,
"grad_norm": 2.2257463932037354,
"learning_rate": 9.876543209876543e-06,
"loss": 0.6494,
"step": 1190
},
{
"epoch": 0.8327550312283136,
"grad_norm": 3.7597286701202393,
"learning_rate": 9.490740740740741e-06,
"loss": 0.7014,
"step": 1200
},
{
"epoch": 0.8327550312283136,
"eval_loss": 0.6637962460517883,
"eval_runtime": 50.4735,
"eval_samples_per_second": 28.55,
"eval_steps_per_second": 7.152,
"step": 1200
},
{
"epoch": 0.8396946564885496,
"grad_norm": 2.7648439407348633,
"learning_rate": 9.104938271604939e-06,
"loss": 0.6734,
"step": 1210
},
{
"epoch": 0.8466342817487855,
"grad_norm": 2.5509233474731445,
"learning_rate": 8.719135802469136e-06,
"loss": 0.5945,
"step": 1220
},
{
"epoch": 0.8535739070090215,
"grad_norm": 2.9669878482818604,
"learning_rate": 8.333333333333334e-06,
"loss": 0.6981,
"step": 1230
},
{
"epoch": 0.8605135322692574,
"grad_norm": 2.990398406982422,
"learning_rate": 7.947530864197531e-06,
"loss": 0.6788,
"step": 1240
},
{
"epoch": 0.8674531575294934,
"grad_norm": 3.9718501567840576,
"learning_rate": 7.561728395061729e-06,
"loss": 0.6359,
"step": 1250
},
{
"epoch": 0.8743927827897293,
"grad_norm": 2.4753811359405518,
"learning_rate": 7.1759259259259266e-06,
"loss": 0.6175,
"step": 1260
},
{
"epoch": 0.8813324080499653,
"grad_norm": 2.3380069732666016,
"learning_rate": 6.790123456790123e-06,
"loss": 0.6573,
"step": 1270
},
{
"epoch": 0.8882720333102012,
"grad_norm": 2.903273344039917,
"learning_rate": 6.404320987654322e-06,
"loss": 0.6507,
"step": 1280
},
{
"epoch": 0.8952116585704372,
"grad_norm": 2.6252002716064453,
"learning_rate": 6.0185185185185185e-06,
"loss": 0.7027,
"step": 1290
},
{
"epoch": 0.9021512838306731,
"grad_norm": 2.3435378074645996,
"learning_rate": 5.632716049382716e-06,
"loss": 0.5784,
"step": 1300
},
{
"epoch": 0.9021512838306731,
"eval_loss": 0.6478676199913025,
"eval_runtime": 50.172,
"eval_samples_per_second": 28.721,
"eval_steps_per_second": 7.195,
"step": 1300
},
{
"epoch": 0.9090909090909091,
"grad_norm": 2.5476410388946533,
"learning_rate": 5.246913580246914e-06,
"loss": 0.6038,
"step": 1310
},
{
"epoch": 0.916030534351145,
"grad_norm": 3.113853931427002,
"learning_rate": 4.861111111111111e-06,
"loss": 0.6613,
"step": 1320
},
{
"epoch": 0.922970159611381,
"grad_norm": 1.9195737838745117,
"learning_rate": 4.475308641975309e-06,
"loss": 0.6073,
"step": 1330
},
{
"epoch": 0.9299097848716169,
"grad_norm": 3.406034231185913,
"learning_rate": 4.0895061728395066e-06,
"loss": 0.6398,
"step": 1340
},
{
"epoch": 0.9368494101318529,
"grad_norm": 2.8904383182525635,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.6691,
"step": 1350
},
{
"epoch": 0.9437890353920888,
"grad_norm": 2.453171968460083,
"learning_rate": 3.3179012345679013e-06,
"loss": 0.6236,
"step": 1360
},
{
"epoch": 0.9507286606523248,
"grad_norm": 2.6889538764953613,
"learning_rate": 2.932098765432099e-06,
"loss": 0.6576,
"step": 1370
},
{
"epoch": 0.9576682859125607,
"grad_norm": 2.621765613555908,
"learning_rate": 2.546296296296296e-06,
"loss": 0.6016,
"step": 1380
},
{
"epoch": 0.9646079111727967,
"grad_norm": 2.4071123600006104,
"learning_rate": 2.1604938271604937e-06,
"loss": 0.6861,
"step": 1390
},
{
"epoch": 0.9715475364330326,
"grad_norm": 2.3079018592834473,
"learning_rate": 1.7746913580246913e-06,
"loss": 0.5638,
"step": 1400
},
{
"epoch": 0.9715475364330326,
"eval_loss": 0.6364541053771973,
"eval_runtime": 50.2347,
"eval_samples_per_second": 28.685,
"eval_steps_per_second": 7.186,
"step": 1400
},
{
"epoch": 0.9784871616932685,
"grad_norm": 3.5345683097839355,
"learning_rate": 1.388888888888889e-06,
"loss": 0.6383,
"step": 1410
},
{
"epoch": 0.9854267869535045,
"grad_norm": 2.898383140563965,
"learning_rate": 1.0030864197530864e-06,
"loss": 0.6235,
"step": 1420
},
{
"epoch": 0.9923664122137404,
"grad_norm": 3.0434718132019043,
"learning_rate": 6.17283950617284e-07,
"loss": 0.669,
"step": 1430
},
{
"epoch": 0.9993060374739764,
"grad_norm": 2.3611180782318115,
"learning_rate": 2.3148148148148148e-07,
"loss": 0.5936,
"step": 1440
}
],
"logging_steps": 10,
"max_steps": 1441,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.9964632564865434e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}