| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.11309218780371437, | |
| "eval_steps": 500, | |
| "global_step": 6400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0008835327172165185, | |
| "grad_norm": 5.665971279144287, | |
| "learning_rate": 4.3286219081272084e-07, | |
| "loss": 1.3738, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.001767065434433037, | |
| "grad_norm": 5.6161651611328125, | |
| "learning_rate": 8.745583038869259e-07, | |
| "loss": 1.1661, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.0026505981516495554, | |
| "grad_norm": 7.866199970245361, | |
| "learning_rate": 1.3162544169611309e-06, | |
| "loss": 1.2107, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.003534130868866074, | |
| "grad_norm": 5.07379674911499, | |
| "learning_rate": 1.7579505300353357e-06, | |
| "loss": 0.9855, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.004417663586082593, | |
| "grad_norm": 3.2607851028442383, | |
| "learning_rate": 2.199646643109541e-06, | |
| "loss": 0.9431, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.005301196303299111, | |
| "grad_norm": 6.517599105834961, | |
| "learning_rate": 2.6413427561837457e-06, | |
| "loss": 0.8566, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.00618472902051563, | |
| "grad_norm": 2.8523333072662354, | |
| "learning_rate": 3.0830388692579506e-06, | |
| "loss": 0.8697, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.007068261737732148, | |
| "grad_norm": 3.460226058959961, | |
| "learning_rate": 3.5247349823321555e-06, | |
| "loss": 0.8099, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.007951794454948667, | |
| "grad_norm": 3.2528891563415527, | |
| "learning_rate": 3.966431095406361e-06, | |
| "loss": 0.766, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.008835327172165185, | |
| "grad_norm": 4.1086039543151855, | |
| "learning_rate": 4.408127208480566e-06, | |
| "loss": 0.7402, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.009718859889381704, | |
| "grad_norm": 3.8160510063171387, | |
| "learning_rate": 4.849823321554771e-06, | |
| "loss": 0.8769, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.010602392606598222, | |
| "grad_norm": 2.901653289794922, | |
| "learning_rate": 5.291519434628975e-06, | |
| "loss": 0.6827, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.011485925323814742, | |
| "grad_norm": 2.5824739933013916, | |
| "learning_rate": 5.73321554770318e-06, | |
| "loss": 0.7252, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.01236945804103126, | |
| "grad_norm": 2.586138963699341, | |
| "learning_rate": 6.174911660777385e-06, | |
| "loss": 0.7701, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.013252990758247778, | |
| "grad_norm": 2.3450210094451904, | |
| "learning_rate": 6.6166077738515904e-06, | |
| "loss": 0.7525, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.014136523475464296, | |
| "grad_norm": 2.7902042865753174, | |
| "learning_rate": 7.058303886925795e-06, | |
| "loss": 0.7097, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.015020056192680814, | |
| "grad_norm": 3.297929286956787, | |
| "learning_rate": 7.5e-06, | |
| "loss": 0.7575, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.015903588909897334, | |
| "grad_norm": 4.028406143188477, | |
| "learning_rate": 7.941696113074205e-06, | |
| "loss": 0.6899, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.016787121627113853, | |
| "grad_norm": 2.2513041496276855, | |
| "learning_rate": 8.38339222614841e-06, | |
| "loss": 0.6655, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.01767065434433037, | |
| "grad_norm": 2.402355670928955, | |
| "learning_rate": 8.825088339222614e-06, | |
| "loss": 0.6601, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.01855418706154689, | |
| "grad_norm": 4.492621898651123, | |
| "learning_rate": 9.26678445229682e-06, | |
| "loss": 0.6925, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.019437719778763407, | |
| "grad_norm": 3.8099517822265625, | |
| "learning_rate": 9.708480565371025e-06, | |
| "loss": 0.6169, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.020321252495979925, | |
| "grad_norm": 4.58193826675415, | |
| "learning_rate": 1.0150176678445231e-05, | |
| "loss": 0.6367, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.021204785213196443, | |
| "grad_norm": 4.745123863220215, | |
| "learning_rate": 1.0591872791519434e-05, | |
| "loss": 0.615, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.02208831793041296, | |
| "grad_norm": 3.260239601135254, | |
| "learning_rate": 1.103356890459364e-05, | |
| "loss": 0.6869, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.022971850647629483, | |
| "grad_norm": 2.485383987426758, | |
| "learning_rate": 1.1475265017667845e-05, | |
| "loss": 0.7527, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.023855383364846, | |
| "grad_norm": 2.26680326461792, | |
| "learning_rate": 1.191696113074205e-05, | |
| "loss": 0.6124, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.02473891608206252, | |
| "grad_norm": 2.348688840866089, | |
| "learning_rate": 1.2358657243816255e-05, | |
| "loss": 0.6511, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.025622448799279038, | |
| "grad_norm": 2.770859956741333, | |
| "learning_rate": 1.280035335689046e-05, | |
| "loss": 0.7047, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.026505981516495556, | |
| "grad_norm": 3.188656806945801, | |
| "learning_rate": 1.3242049469964666e-05, | |
| "loss": 0.6639, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.027389514233712074, | |
| "grad_norm": 2.7158899307250977, | |
| "learning_rate": 1.368374558303887e-05, | |
| "loss": 0.6795, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.028273046950928592, | |
| "grad_norm": 2.7986080646514893, | |
| "learning_rate": 1.4125441696113076e-05, | |
| "loss": 0.6341, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.02915657966814511, | |
| "grad_norm": 1.9698214530944824, | |
| "learning_rate": 1.456713780918728e-05, | |
| "loss": 0.6031, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.03004011238536163, | |
| "grad_norm": 2.495985507965088, | |
| "learning_rate": 1.5008833922261484e-05, | |
| "loss": 0.5959, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.030923645102578147, | |
| "grad_norm": 2.990360975265503, | |
| "learning_rate": 1.545053003533569e-05, | |
| "loss": 0.6412, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.03180717781979467, | |
| "grad_norm": 3.658212184906006, | |
| "learning_rate": 1.5892226148409894e-05, | |
| "loss": 0.5065, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.03269071053701118, | |
| "grad_norm": 2.010875940322876, | |
| "learning_rate": 1.63339222614841e-05, | |
| "loss": 0.5611, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.033574243254227705, | |
| "grad_norm": 2.408937692642212, | |
| "learning_rate": 1.6775618374558306e-05, | |
| "loss": 0.5298, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.03445777597144422, | |
| "grad_norm": 2.3144407272338867, | |
| "learning_rate": 1.721731448763251e-05, | |
| "loss": 0.5759, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.03534130868866074, | |
| "grad_norm": 2.944115400314331, | |
| "learning_rate": 1.7659010600706715e-05, | |
| "loss": 0.5782, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.03622484140587726, | |
| "grad_norm": 2.3239428997039795, | |
| "learning_rate": 1.810070671378092e-05, | |
| "loss": 0.5221, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.03710837412309378, | |
| "grad_norm": 4.565939426422119, | |
| "learning_rate": 1.8542402826855124e-05, | |
| "loss": 0.5966, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.0379919068403103, | |
| "grad_norm": 2.6089091300964355, | |
| "learning_rate": 1.898409893992933e-05, | |
| "loss": 0.5989, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.038875439557526814, | |
| "grad_norm": 2.4395945072174072, | |
| "learning_rate": 1.9425795053003533e-05, | |
| "loss": 0.5097, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.039758972274743336, | |
| "grad_norm": 2.274600028991699, | |
| "learning_rate": 1.986749116607774e-05, | |
| "loss": 0.4934, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.04064250499195985, | |
| "grad_norm": 2.393251895904541, | |
| "learning_rate": 2.0309187279151945e-05, | |
| "loss": 0.5354, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.04152603770917637, | |
| "grad_norm": 2.613900899887085, | |
| "learning_rate": 2.075088339222615e-05, | |
| "loss": 0.5236, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.04240957042639289, | |
| "grad_norm": 2.233302116394043, | |
| "learning_rate": 2.1192579505300354e-05, | |
| "loss": 0.5057, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.04329310314360941, | |
| "grad_norm": 2.2634503841400146, | |
| "learning_rate": 2.163427561837456e-05, | |
| "loss": 0.5448, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.04417663586082592, | |
| "grad_norm": 1.6744658946990967, | |
| "learning_rate": 2.2075971731448763e-05, | |
| "loss": 0.5418, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.045060168578042445, | |
| "grad_norm": 2.9320178031921387, | |
| "learning_rate": 2.2517667844522968e-05, | |
| "loss": 0.5944, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.04594370129525897, | |
| "grad_norm": 2.2643797397613525, | |
| "learning_rate": 2.2959363957597176e-05, | |
| "loss": 0.4945, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.04682723401247548, | |
| "grad_norm": 2.389902114868164, | |
| "learning_rate": 2.340106007067138e-05, | |
| "loss": 0.5225, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.047710766729692, | |
| "grad_norm": 2.2676665782928467, | |
| "learning_rate": 2.3842756183745584e-05, | |
| "loss": 0.5661, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.04859429944690852, | |
| "grad_norm": 2.340926170349121, | |
| "learning_rate": 2.428445229681979e-05, | |
| "loss": 0.6125, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.04947783216412504, | |
| "grad_norm": 1.925943374633789, | |
| "learning_rate": 2.4726148409893997e-05, | |
| "loss": 0.5105, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.050361364881341554, | |
| "grad_norm": 3.1281192302703857, | |
| "learning_rate": 2.5167844522968198e-05, | |
| "loss": 0.5893, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.051244897598558076, | |
| "grad_norm": 2.345649242401123, | |
| "learning_rate": 2.5609540636042406e-05, | |
| "loss": 0.545, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.05212843031577459, | |
| "grad_norm": 2.9023561477661133, | |
| "learning_rate": 2.605123674911661e-05, | |
| "loss": 0.5299, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.05301196303299111, | |
| "grad_norm": 2.491269588470459, | |
| "learning_rate": 2.649293286219081e-05, | |
| "loss": 0.5186, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.05389549575020763, | |
| "grad_norm": 1.842517375946045, | |
| "learning_rate": 2.693462897526502e-05, | |
| "loss": 0.5259, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.05477902846742415, | |
| "grad_norm": 3.319514274597168, | |
| "learning_rate": 2.7376325088339223e-05, | |
| "loss": 0.6663, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.05566256118464067, | |
| "grad_norm": 2.7143654823303223, | |
| "learning_rate": 2.781802120141343e-05, | |
| "loss": 0.5152, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.056546093901857185, | |
| "grad_norm": 2.8187732696533203, | |
| "learning_rate": 2.8259717314487632e-05, | |
| "loss": 0.5417, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.057429626619073706, | |
| "grad_norm": 2.8348097801208496, | |
| "learning_rate": 2.870141342756184e-05, | |
| "loss": 0.5039, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.05831315933629022, | |
| "grad_norm": 3.6297833919525146, | |
| "learning_rate": 2.9143109540636045e-05, | |
| "loss": 0.4647, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.05919669205350674, | |
| "grad_norm": 2.6729063987731934, | |
| "learning_rate": 2.9584805653710253e-05, | |
| "loss": 0.4652, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.06008022477072326, | |
| "grad_norm": 3.030548572540283, | |
| "learning_rate": 3.0026501766784454e-05, | |
| "loss": 0.4914, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.06096375748793978, | |
| "grad_norm": 1.844643235206604, | |
| "learning_rate": 3.0468197879858658e-05, | |
| "loss": 0.5449, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.061847290205156294, | |
| "grad_norm": 1.6973118782043457, | |
| "learning_rate": 3.090989399293286e-05, | |
| "loss": 0.5072, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.06273082292237281, | |
| "grad_norm": 2.626692295074463, | |
| "learning_rate": 3.135159010600707e-05, | |
| "loss": 0.5639, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.06361435563958934, | |
| "grad_norm": 2.971773624420166, | |
| "learning_rate": 3.179328621908128e-05, | |
| "loss": 0.4729, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.06449788835680585, | |
| "grad_norm": 2.134610414505005, | |
| "learning_rate": 3.2234982332155476e-05, | |
| "loss": 0.6047, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.06538142107402237, | |
| "grad_norm": 1.8596552610397339, | |
| "learning_rate": 3.267667844522969e-05, | |
| "loss": 0.5369, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.0662649537912389, | |
| "grad_norm": 2.5137698650360107, | |
| "learning_rate": 3.311837455830389e-05, | |
| "loss": 0.5014, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.06714848650845541, | |
| "grad_norm": 2.8211522102355957, | |
| "learning_rate": 3.356007067137809e-05, | |
| "loss": 0.5128, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.06803201922567192, | |
| "grad_norm": 2.095426559448242, | |
| "learning_rate": 3.40017667844523e-05, | |
| "loss": 0.5345, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.06891555194288844, | |
| "grad_norm": 2.1965081691741943, | |
| "learning_rate": 3.4443462897526505e-05, | |
| "loss": 0.479, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.06979908466010497, | |
| "grad_norm": 2.1722958087921143, | |
| "learning_rate": 3.488515901060071e-05, | |
| "loss": 0.5652, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.07068261737732148, | |
| "grad_norm": 2.7183449268341064, | |
| "learning_rate": 3.5326855123674914e-05, | |
| "loss": 0.5272, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.071566150094538, | |
| "grad_norm": 2.356076717376709, | |
| "learning_rate": 3.576855123674912e-05, | |
| "loss": 0.4904, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 0.07244968281175453, | |
| "grad_norm": 1.7549006938934326, | |
| "learning_rate": 3.621024734982332e-05, | |
| "loss": 0.4755, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.07333321552897104, | |
| "grad_norm": 2.0377912521362305, | |
| "learning_rate": 3.665194346289753e-05, | |
| "loss": 0.4897, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 0.07421674824618756, | |
| "grad_norm": 2.4711716175079346, | |
| "learning_rate": 3.709363957597173e-05, | |
| "loss": 0.4679, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.07510028096340407, | |
| "grad_norm": 2.700162649154663, | |
| "learning_rate": 3.7535335689045936e-05, | |
| "loss": 0.4712, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.0759838136806206, | |
| "grad_norm": 1.9648590087890625, | |
| "learning_rate": 3.797703180212015e-05, | |
| "loss": 0.4779, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.07686734639783711, | |
| "grad_norm": 2.4238970279693604, | |
| "learning_rate": 3.8418727915194345e-05, | |
| "loss": 0.4463, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 0.07775087911505363, | |
| "grad_norm": 1.745356798171997, | |
| "learning_rate": 3.8860424028268556e-05, | |
| "loss": 0.4917, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.07863441183227014, | |
| "grad_norm": 5.889612197875977, | |
| "learning_rate": 3.930212014134276e-05, | |
| "loss": 0.5572, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 0.07951794454948667, | |
| "grad_norm": 2.7529609203338623, | |
| "learning_rate": 3.9743816254416965e-05, | |
| "loss": 0.4553, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.08040147726670319, | |
| "grad_norm": 2.4175944328308105, | |
| "learning_rate": 4.018551236749117e-05, | |
| "loss": 0.4598, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 0.0812850099839197, | |
| "grad_norm": 2.2330217361450195, | |
| "learning_rate": 4.0627208480565374e-05, | |
| "loss": 0.5445, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.08216854270113623, | |
| "grad_norm": 2.4177329540252686, | |
| "learning_rate": 4.106890459363958e-05, | |
| "loss": 0.4537, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 0.08305207541835274, | |
| "grad_norm": 2.6188764572143555, | |
| "learning_rate": 4.151060070671378e-05, | |
| "loss": 0.5158, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.08393560813556926, | |
| "grad_norm": 3.5044455528259277, | |
| "learning_rate": 4.195229681978799e-05, | |
| "loss": 0.4598, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.08481914085278577, | |
| "grad_norm": 2.2751505374908447, | |
| "learning_rate": 4.239399293286219e-05, | |
| "loss": 0.4662, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.0857026735700023, | |
| "grad_norm": 2.0289080142974854, | |
| "learning_rate": 4.28356890459364e-05, | |
| "loss": 0.459, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 0.08658620628721882, | |
| "grad_norm": 2.6102516651153564, | |
| "learning_rate": 4.32773851590106e-05, | |
| "loss": 0.4275, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.08746973900443533, | |
| "grad_norm": 2.5842251777648926, | |
| "learning_rate": 4.3719081272084805e-05, | |
| "loss": 0.5575, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 0.08835327172165185, | |
| "grad_norm": 3.6427652835845947, | |
| "learning_rate": 4.4160777385159016e-05, | |
| "loss": 0.4197, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.08923680443886838, | |
| "grad_norm": 1.8962676525115967, | |
| "learning_rate": 4.4602473498233214e-05, | |
| "loss": 0.4525, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 0.09012033715608489, | |
| "grad_norm": 2.1373822689056396, | |
| "learning_rate": 4.5044169611307425e-05, | |
| "loss": 0.4469, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.0910038698733014, | |
| "grad_norm": 5.542126178741455, | |
| "learning_rate": 4.548586572438163e-05, | |
| "loss": 0.5283, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 0.09188740259051793, | |
| "grad_norm": 2.4414310455322266, | |
| "learning_rate": 4.5927561837455834e-05, | |
| "loss": 0.4826, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.09277093530773445, | |
| "grad_norm": 3.52422833442688, | |
| "learning_rate": 4.636925795053004e-05, | |
| "loss": 0.3895, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 0.09365446802495096, | |
| "grad_norm": 2.1975631713867188, | |
| "learning_rate": 4.681095406360424e-05, | |
| "loss": 0.4873, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.09453800074216748, | |
| "grad_norm": 3.4910616874694824, | |
| "learning_rate": 4.725265017667845e-05, | |
| "loss": 0.4895, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 0.095421533459384, | |
| "grad_norm": 2.1225690841674805, | |
| "learning_rate": 4.769434628975265e-05, | |
| "loss": 0.4686, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.09630506617660052, | |
| "grad_norm": 2.2319257259368896, | |
| "learning_rate": 4.8136042402826856e-05, | |
| "loss": 0.4723, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 0.09718859889381704, | |
| "grad_norm": 2.2340879440307617, | |
| "learning_rate": 4.857773851590106e-05, | |
| "loss": 0.5258, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.09807213161103355, | |
| "grad_norm": 3.2808139324188232, | |
| "learning_rate": 4.901943462897527e-05, | |
| "loss": 0.4851, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 0.09895566432825008, | |
| "grad_norm": 2.4828484058380127, | |
| "learning_rate": 4.946113074204947e-05, | |
| "loss": 0.5311, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.0998391970454666, | |
| "grad_norm": 1.7307246923446655, | |
| "learning_rate": 4.990282685512368e-05, | |
| "loss": 0.411, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 0.10072272976268311, | |
| "grad_norm": 1.9073278903961182, | |
| "learning_rate": 4.996171290569595e-05, | |
| "loss": 0.4184, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.10160626247989964, | |
| "grad_norm": 1.8571208715438843, | |
| "learning_rate": 4.9912626887357406e-05, | |
| "loss": 0.4071, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 0.10248979519711615, | |
| "grad_norm": 1.7524621486663818, | |
| "learning_rate": 4.986354086901887e-05, | |
| "loss": 0.4712, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.10337332791433267, | |
| "grad_norm": 4.2943434715271, | |
| "learning_rate": 4.9814454850680335e-05, | |
| "loss": 0.4912, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 0.10425686063154918, | |
| "grad_norm": 2.398043632507324, | |
| "learning_rate": 4.97653688323418e-05, | |
| "loss": 0.5589, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.10514039334876571, | |
| "grad_norm": 1.9587973356246948, | |
| "learning_rate": 4.9716282814003265e-05, | |
| "loss": 0.4507, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 0.10602392606598222, | |
| "grad_norm": 2.0629475116729736, | |
| "learning_rate": 4.966719679566473e-05, | |
| "loss": 0.5429, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.10690745878319874, | |
| "grad_norm": 1.6127039194107056, | |
| "learning_rate": 4.961811077732619e-05, | |
| "loss": 0.3789, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 0.10779099150041525, | |
| "grad_norm": 2.230015993118286, | |
| "learning_rate": 4.956902475898765e-05, | |
| "loss": 0.3949, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.10867452421763178, | |
| "grad_norm": 1.9963310956954956, | |
| "learning_rate": 4.9519938740649116e-05, | |
| "loss": 0.4491, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 0.1095580569348483, | |
| "grad_norm": 2.2731542587280273, | |
| "learning_rate": 4.947085272231058e-05, | |
| "loss": 0.435, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.11044158965206481, | |
| "grad_norm": 2.447551727294922, | |
| "learning_rate": 4.9421766703972046e-05, | |
| "loss": 0.3865, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 0.11132512236928134, | |
| "grad_norm": 2.126950740814209, | |
| "learning_rate": 4.9372680685633504e-05, | |
| "loss": 0.4175, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.11220865508649785, | |
| "grad_norm": 2.22995924949646, | |
| "learning_rate": 4.932359466729497e-05, | |
| "loss": 0.4387, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 0.11309218780371437, | |
| "grad_norm": 1.5801736116409302, | |
| "learning_rate": 4.927450864895643e-05, | |
| "loss": 0.4554, | |
| "step": 6400 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 56591, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |