| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.8142589118198873, | |
| "eval_steps": 500, | |
| "global_step": 1500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01876172607879925, | |
| "grad_norm": 3.62505841255188, | |
| "learning_rate": 1.2e-05, | |
| "loss": 1.0239, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0375234521575985, | |
| "grad_norm": 1.075655221939087, | |
| "learning_rate": 2.5333333333333337e-05, | |
| "loss": 0.2792, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.05628517823639775, | |
| "grad_norm": 0.6459623575210571, | |
| "learning_rate": 3.866666666666667e-05, | |
| "loss": 0.1393, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.075046904315197, | |
| "grad_norm": 0.5845428705215454, | |
| "learning_rate": 5.2000000000000004e-05, | |
| "loss": 0.1033, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.09380863039399624, | |
| "grad_norm": 0.5566798448562622, | |
| "learning_rate": 6.533333333333334e-05, | |
| "loss": 0.086, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.1125703564727955, | |
| "grad_norm": 0.5197991132736206, | |
| "learning_rate": 7.866666666666666e-05, | |
| "loss": 0.0709, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.13133208255159476, | |
| "grad_norm": 0.5038473010063171, | |
| "learning_rate": 9.200000000000001e-05, | |
| "loss": 0.0693, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.150093808630394, | |
| "grad_norm": 0.6459262371063232, | |
| "learning_rate": 9.999805586153205e-05, | |
| "loss": 0.0631, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.16885553470919323, | |
| "grad_norm": 0.41942092776298523, | |
| "learning_rate": 9.997618604001829e-05, | |
| "loss": 0.0583, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.18761726078799248, | |
| "grad_norm": 0.5155685544013977, | |
| "learning_rate": 9.993002688846913e-05, | |
| "loss": 0.0551, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.20637898686679174, | |
| "grad_norm": 0.35105767846107483, | |
| "learning_rate": 9.985960084106682e-05, | |
| "loss": 0.0477, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.225140712945591, | |
| "grad_norm": 0.37585997581481934, | |
| "learning_rate": 9.976494212614377e-05, | |
| "loss": 0.0469, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.24390243902439024, | |
| "grad_norm": 0.3624650835990906, | |
| "learning_rate": 9.964609674954696e-05, | |
| "loss": 0.0423, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.2626641651031895, | |
| "grad_norm": 0.41389772295951843, | |
| "learning_rate": 9.950312247227825e-05, | |
| "loss": 0.0429, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.28142589118198874, | |
| "grad_norm": 0.2967846691608429, | |
| "learning_rate": 9.933608878242153e-05, | |
| "loss": 0.0432, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.300187617260788, | |
| "grad_norm": 0.3088073134422302, | |
| "learning_rate": 9.914507686137019e-05, | |
| "loss": 0.0392, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.31894934333958724, | |
| "grad_norm": 0.2572576701641083, | |
| "learning_rate": 9.893017954437156e-05, | |
| "loss": 0.0377, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.33771106941838647, | |
| "grad_norm": 0.40623533725738525, | |
| "learning_rate": 9.869150127540727e-05, | |
| "loss": 0.0347, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.35647279549718575, | |
| "grad_norm": 0.41246405243873596, | |
| "learning_rate": 9.842915805643155e-05, | |
| "loss": 0.0349, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.37523452157598497, | |
| "grad_norm": 0.34366941452026367, | |
| "learning_rate": 9.81432773909923e-05, | |
| "loss": 0.0348, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.39399624765478425, | |
| "grad_norm": 0.41731664538383484, | |
| "learning_rate": 9.783399822226189e-05, | |
| "loss": 0.0313, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.41275797373358347, | |
| "grad_norm": 0.39282381534576416, | |
| "learning_rate": 9.750147086550844e-05, | |
| "loss": 0.0361, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.43151969981238275, | |
| "grad_norm": 0.2867172956466675, | |
| "learning_rate": 9.714585693503974e-05, | |
| "loss": 0.0347, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.450281425891182, | |
| "grad_norm": 0.2782723903656006, | |
| "learning_rate": 9.676732926565585e-05, | |
| "loss": 0.0302, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.46904315196998125, | |
| "grad_norm": 0.34300780296325684, | |
| "learning_rate": 9.636607182864827e-05, | |
| "loss": 0.0286, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.4878048780487805, | |
| "grad_norm": 0.31700846552848816, | |
| "learning_rate": 9.594227964238653e-05, | |
| "loss": 0.032, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5065666041275797, | |
| "grad_norm": 0.48065921664237976, | |
| "learning_rate": 9.549615867753573e-05, | |
| "loss": 0.0303, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.525328330206379, | |
| "grad_norm": 0.3679749369621277, | |
| "learning_rate": 9.502792575695112e-05, | |
| "loss": 0.0313, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.5440900562851783, | |
| "grad_norm": 0.3818820118904114, | |
| "learning_rate": 9.453780845029821e-05, | |
| "loss": 0.0287, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.5628517823639775, | |
| "grad_norm": 0.2918836176395416, | |
| "learning_rate": 9.402604496344984e-05, | |
| "loss": 0.0281, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5816135084427767, | |
| "grad_norm": 0.35366421937942505, | |
| "learning_rate": 9.349288402271388e-05, | |
| "loss": 0.0286, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.600375234521576, | |
| "grad_norm": 0.3901827931404114, | |
| "learning_rate": 9.293858475394754e-05, | |
| "loss": 0.0286, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.6191369606003753, | |
| "grad_norm": 0.4397772252559662, | |
| "learning_rate": 9.236341655661778e-05, | |
| "loss": 0.0267, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.6378986866791745, | |
| "grad_norm": 0.3393397033214569, | |
| "learning_rate": 9.176765897286813e-05, | |
| "loss": 0.0284, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.6566604127579737, | |
| "grad_norm": 0.2746170163154602, | |
| "learning_rate": 9.115160155165614e-05, | |
| "loss": 0.0251, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.6754221388367729, | |
| "grad_norm": 0.36951932311058044, | |
| "learning_rate": 9.05155437080275e-05, | |
| "loss": 0.0263, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.6941838649155723, | |
| "grad_norm": 0.32861998677253723, | |
| "learning_rate": 8.98597945775948e-05, | |
| "loss": 0.0254, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.7129455909943715, | |
| "grad_norm": 0.2612588703632355, | |
| "learning_rate": 8.9184672866292e-05, | |
| "loss": 0.0224, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.7317073170731707, | |
| "grad_norm": 0.27696406841278076, | |
| "learning_rate": 8.849050669547768e-05, | |
| "loss": 0.0222, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.7504690431519699, | |
| "grad_norm": 0.32792651653289795, | |
| "learning_rate": 8.77776334424621e-05, | |
| "loss": 0.0235, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 0.30844274163246155, | |
| "learning_rate": 8.704639957653567e-05, | |
| "loss": 0.0235, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.7879924953095685, | |
| "grad_norm": 0.1702653169631958, | |
| "learning_rate": 8.629716049057872e-05, | |
| "loss": 0.0245, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.8067542213883677, | |
| "grad_norm": 0.2871593236923218, | |
| "learning_rate": 8.553028032833397e-05, | |
| "loss": 0.0219, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.8255159474671669, | |
| "grad_norm": 0.27894458174705505, | |
| "learning_rate": 8.474613180742628e-05, | |
| "loss": 0.0187, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.8442776735459663, | |
| "grad_norm": 0.2544475197792053, | |
| "learning_rate": 8.394509603821499e-05, | |
| "loss": 0.0192, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.8630393996247655, | |
| "grad_norm": 0.22808410227298737, | |
| "learning_rate": 8.31275623385675e-05, | |
| "loss": 0.0217, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.8818011257035647, | |
| "grad_norm": 0.23319919407367706, | |
| "learning_rate": 8.229392804464362e-05, | |
| "loss": 0.0194, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.900562851782364, | |
| "grad_norm": 0.3450873792171478, | |
| "learning_rate": 8.14445983177832e-05, | |
| "loss": 0.0219, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.9193245778611632, | |
| "grad_norm": 0.32911404967308044, | |
| "learning_rate": 8.057998594759022e-05, | |
| "loss": 0.0204, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.9380863039399625, | |
| "grad_norm": 0.31988227367401123, | |
| "learning_rate": 7.970051115130966e-05, | |
| "loss": 0.0214, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.9568480300187617, | |
| "grad_norm": 0.3178263306617737, | |
| "learning_rate": 7.880660136959428e-05, | |
| "loss": 0.0187, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.975609756097561, | |
| "grad_norm": 0.27726197242736816, | |
| "learning_rate": 7.789869105876083e-05, | |
| "loss": 0.0176, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.9943714821763602, | |
| "grad_norm": 0.2947522699832916, | |
| "learning_rate": 7.697722147963626e-05, | |
| "loss": 0.0184, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.0131332082551594, | |
| "grad_norm": 0.3290204405784607, | |
| "learning_rate": 7.604264048309717e-05, | |
| "loss": 0.0186, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.0318949343339587, | |
| "grad_norm": 0.30177435278892517, | |
| "learning_rate": 7.509540229240601e-05, | |
| "loss": 0.0186, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.050656660412758, | |
| "grad_norm": 0.2899879813194275, | |
| "learning_rate": 7.413596728245054e-05, | |
| "loss": 0.0186, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.0694183864915572, | |
| "grad_norm": 0.29170823097229004, | |
| "learning_rate": 7.316480175599309e-05, | |
| "loss": 0.019, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.0881801125703565, | |
| "grad_norm": 0.2978293299674988, | |
| "learning_rate": 7.218237771703921e-05, | |
| "loss": 0.0189, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.1069418386491556, | |
| "grad_norm": 0.2766323685646057, | |
| "learning_rate": 7.118917264143501e-05, | |
| "loss": 0.0171, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.125703564727955, | |
| "grad_norm": 0.2202291339635849, | |
| "learning_rate": 7.018566924480543e-05, | |
| "loss": 0.0185, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.1444652908067543, | |
| "grad_norm": 0.27024173736572266, | |
| "learning_rate": 6.917235524794558e-05, | |
| "loss": 0.0176, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.1632270168855534, | |
| "grad_norm": 0.3646942675113678, | |
| "learning_rate": 6.814972313977967e-05, | |
| "loss": 0.018, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.1819887429643527, | |
| "grad_norm": 0.28385287523269653, | |
| "learning_rate": 6.711826993800248e-05, | |
| "loss": 0.0175, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.200750469043152, | |
| "grad_norm": 0.21348656713962555, | |
| "learning_rate": 6.607849694751977e-05, | |
| "loss": 0.0174, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.2195121951219512, | |
| "grad_norm": 0.23341545462608337, | |
| "learning_rate": 6.503090951680512e-05, | |
| "loss": 0.0163, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.2382739212007505, | |
| "grad_norm": 0.21429835259914398, | |
| "learning_rate": 6.397601679229126e-05, | |
| "loss": 0.0173, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.2570356472795496, | |
| "grad_norm": 0.22369572520256042, | |
| "learning_rate": 6.291433147091583e-05, | |
| "loss": 0.0138, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.275797373358349, | |
| "grad_norm": 0.2960052788257599, | |
| "learning_rate": 6.184636955094138e-05, | |
| "loss": 0.0147, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.2945590994371483, | |
| "grad_norm": 0.2096583992242813, | |
| "learning_rate": 6.077265008117081e-05, | |
| "loss": 0.0164, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.3133208255159474, | |
| "grad_norm": 0.16649533808231354, | |
| "learning_rate": 5.969369490868042e-05, | |
| "loss": 0.0154, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.3320825515947468, | |
| "grad_norm": 0.21355998516082764, | |
| "learning_rate": 5.861002842519259e-05, | |
| "loss": 0.0135, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.3508442776735459, | |
| "grad_norm": 0.203876331448555, | |
| "learning_rate": 5.75221773122121e-05, | |
| "loss": 0.0149, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.3696060037523452, | |
| "grad_norm": 0.20695096254348755, | |
| "learning_rate": 5.6430670285049314e-05, | |
| "loss": 0.0154, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.3883677298311445, | |
| "grad_norm": 0.2057274878025055, | |
| "learning_rate": 5.533603783585496e-05, | |
| "loss": 0.0159, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.4071294559099436, | |
| "grad_norm": 0.20767205953598022, | |
| "learning_rate": 5.423881197579144e-05, | |
| "loss": 0.0168, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.425891181988743, | |
| "grad_norm": 0.16897110641002655, | |
| "learning_rate": 5.313952597646568e-05, | |
| "loss": 0.016, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.444652908067542, | |
| "grad_norm": 0.2598036527633667, | |
| "learning_rate": 5.203871411074954e-05, | |
| "loss": 0.015, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.4634146341463414, | |
| "grad_norm": 0.2077157348394394, | |
| "learning_rate": 5.093691139311356e-05, | |
| "loss": 0.0138, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.4821763602251408, | |
| "grad_norm": 0.24033433198928833, | |
| "learning_rate": 4.9834653319600246e-05, | |
| "loss": 0.0135, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.50093808630394, | |
| "grad_norm": 0.29492080211639404, | |
| "learning_rate": 4.873247560756324e-05, | |
| "loss": 0.0129, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.5196998123827392, | |
| "grad_norm": 0.11821957677602768, | |
| "learning_rate": 4.7630913935299066e-05, | |
| "loss": 0.013, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 0.14121685922145844, | |
| "learning_rate": 4.65305036816978e-05, | |
| "loss": 0.0148, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.5572232645403377, | |
| "grad_norm": 0.17172347009181976, | |
| "learning_rate": 4.543177966603925e-05, | |
| "loss": 0.014, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.575984990619137, | |
| "grad_norm": 0.17106831073760986, | |
| "learning_rate": 4.433527588806103e-05, | |
| "loss": 0.0119, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.5947467166979363, | |
| "grad_norm": 0.20430134236812592, | |
| "learning_rate": 4.324152526842517e-05, | |
| "loss": 0.0145, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.6135084427767354, | |
| "grad_norm": 0.19569315016269684, | |
| "learning_rate": 4.215105938970889e-05, | |
| "loss": 0.0137, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.6322701688555346, | |
| "grad_norm": 0.16233587265014648, | |
| "learning_rate": 4.1064408238045994e-05, | |
| "loss": 0.0134, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.6510318949343339, | |
| "grad_norm": 0.16989213228225708, | |
| "learning_rate": 3.9982099945543945e-05, | |
| "loss": 0.0126, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.6697936210131332, | |
| "grad_norm": 0.18682365119457245, | |
| "learning_rate": 3.890466053360211e-05, | |
| "loss": 0.0115, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.6885553470919326, | |
| "grad_norm": 0.20883384346961975, | |
| "learning_rate": 3.783261365725592e-05, | |
| "loss": 0.0124, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.7073170731707317, | |
| "grad_norm": 0.21428252756595612, | |
| "learning_rate": 3.676648035067093e-05, | |
| "loss": 0.0122, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.726078799249531, | |
| "grad_norm": 0.20880082249641418, | |
| "learning_rate": 3.570677877391092e-05, | |
| "loss": 0.0125, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.7448405253283301, | |
| "grad_norm": 0.16143251955509186, | |
| "learning_rate": 3.465402396110269e-05, | |
| "loss": 0.012, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.7636022514071295, | |
| "grad_norm": 0.1445058137178421, | |
| "learning_rate": 3.360872757012011e-05, | |
| "loss": 0.0123, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.7823639774859288, | |
| "grad_norm": 0.21280893683433533, | |
| "learning_rate": 3.257139763390925e-05, | |
| "loss": 0.0125, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.8011257035647281, | |
| "grad_norm": 0.16495130956172943, | |
| "learning_rate": 3.1542538313575035e-05, | |
| "loss": 0.0116, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.8198874296435272, | |
| "grad_norm": 0.19219523668289185, | |
| "learning_rate": 3.052264965335e-05, | |
| "loss": 0.0133, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.8386491557223263, | |
| "grad_norm": 0.173895001411438, | |
| "learning_rate": 2.9512227337563604e-05, | |
| "loss": 0.0116, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.8574108818011257, | |
| "grad_norm": 0.16958042979240417, | |
| "learning_rate": 2.8511762449730795e-05, | |
| "loss": 0.0125, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.876172607879925, | |
| "grad_norm": 0.1639089733362198, | |
| "learning_rate": 2.7521741233876496e-05, | |
| "loss": 0.0122, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.8949343339587243, | |
| "grad_norm": 0.16494053602218628, | |
| "learning_rate": 2.654264485821214e-05, | |
| "loss": 0.011, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.9136960600375235, | |
| "grad_norm": 0.17180362343788147, | |
| "learning_rate": 2.55749491812794e-05, | |
| "loss": 0.0113, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.9324577861163226, | |
| "grad_norm": 0.19289036095142365, | |
| "learning_rate": 2.4619124520674146e-05, | |
| "loss": 0.0131, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.951219512195122, | |
| "grad_norm": 0.1478220820426941, | |
| "learning_rate": 2.3675635424463754e-05, | |
| "loss": 0.0097, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.9699812382739212, | |
| "grad_norm": 0.18471956253051758, | |
| "learning_rate": 2.2744940445408202e-05, | |
| "loss": 0.0118, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.9887429643527206, | |
| "grad_norm": 0.16719312965869904, | |
| "learning_rate": 2.182749191809518e-05, | |
| "loss": 0.0104, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.00750469043152, | |
| "grad_norm": 0.15944059193134308, | |
| "learning_rate": 2.09237357390974e-05, | |
| "loss": 0.0109, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.026266416510319, | |
| "grad_norm": 0.20150484144687653, | |
| "learning_rate": 2.0034111150258666e-05, | |
| "loss": 0.0129, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.045028142589118, | |
| "grad_norm": 0.15957467257976532, | |
| "learning_rate": 1.9159050525214452e-05, | |
| "loss": 0.011, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.0637898686679175, | |
| "grad_norm": 0.133390411734581, | |
| "learning_rate": 1.8298979159250557e-05, | |
| "loss": 0.0103, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.082551594746717, | |
| "grad_norm": 0.13808146119117737, | |
| "learning_rate": 1.745431506260173e-05, | |
| "loss": 0.0115, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.101313320825516, | |
| "grad_norm": 0.16296911239624023, | |
| "learning_rate": 1.662546875729138e-05, | |
| "loss": 0.0112, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.120075046904315, | |
| "grad_norm": 0.13522660732269287, | |
| "learning_rate": 1.581284307761024e-05, | |
| "loss": 0.0106, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.1388367729831144, | |
| "grad_norm": 0.19323870539665222, | |
| "learning_rate": 1.5016832974331724e-05, | |
| "loss": 0.0123, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.1575984990619137, | |
| "grad_norm": 0.1635001301765442, | |
| "learning_rate": 1.4237825322758736e-05, | |
| "loss": 0.013, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.176360225140713, | |
| "grad_norm": 0.17513315379619598, | |
| "learning_rate": 1.3476198734695122e-05, | |
| "loss": 0.0104, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.1951219512195124, | |
| "grad_norm": 0.10277465730905533, | |
| "learning_rate": 1.2732323374433707e-05, | |
| "loss": 0.0105, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.2138836772983113, | |
| "grad_norm": 0.1144745722413063, | |
| "learning_rate": 1.2006560778849578e-05, | |
| "loss": 0.0103, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.2326454033771106, | |
| "grad_norm": 0.1264655739068985, | |
| "learning_rate": 1.1299263681686706e-05, | |
| "loss": 0.0119, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.25140712945591, | |
| "grad_norm": 0.10604596883058548, | |
| "learning_rate": 1.0610775842122972e-05, | |
| "loss": 0.0109, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.2701688555347093, | |
| "grad_norm": 0.14508432149887085, | |
| "learning_rate": 9.941431877696955e-06, | |
| "loss": 0.0109, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.2889305816135086, | |
| "grad_norm": 0.12284737080335617, | |
| "learning_rate": 9.291557101677784e-06, | |
| "loss": 0.0093, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.3076923076923075, | |
| "grad_norm": 0.1272381991147995, | |
| "learning_rate": 8.661467364956993e-06, | |
| "loss": 0.0116, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.326454033771107, | |
| "grad_norm": 0.14646074175834656, | |
| "learning_rate": 8.051468902539272e-06, | |
| "loss": 0.0102, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.345215759849906, | |
| "grad_norm": 0.11431489139795303, | |
| "learning_rate": 7.461858184706777e-06, | |
| "loss": 0.0091, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.3639774859287055, | |
| "grad_norm": 0.1557149738073349, | |
| "learning_rate": 6.892921772929112e-06, | |
| "loss": 0.0097, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.382739212007505, | |
| "grad_norm": 0.14029847085475922, | |
| "learning_rate": 6.344936180589351e-06, | |
| "loss": 0.0108, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.401500938086304, | |
| "grad_norm": 0.11800851672887802, | |
| "learning_rate": 5.818167738593505e-06, | |
| "loss": 0.0098, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.420262664165103, | |
| "grad_norm": 0.11177805066108704, | |
| "learning_rate": 5.312872465928881e-06, | |
| "loss": 0.0093, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.4390243902439024, | |
| "grad_norm": 0.12152400612831116, | |
| "learning_rate": 4.829295945234258e-06, | |
| "loss": 0.0099, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.4577861163227017, | |
| "grad_norm": 0.10167639702558517, | |
| "learning_rate": 4.367673203442241e-06, | |
| "loss": 0.0096, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.476547842401501, | |
| "grad_norm": 0.13529738783836365, | |
| "learning_rate": 3.928228597551947e-06, | |
| "loss": 0.0099, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.4953095684803, | |
| "grad_norm": 0.10373085737228394, | |
| "learning_rate": 3.511175705587433e-06, | |
| "loss": 0.0088, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.5140712945590993, | |
| "grad_norm": 0.13128240406513214, | |
| "learning_rate": 3.1167172227949347e-06, | |
| "loss": 0.0113, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.5328330206378986, | |
| "grad_norm": 0.11484728753566742, | |
| "learning_rate": 2.7450448631293036e-06, | |
| "loss": 0.0088, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.551594746716698, | |
| "grad_norm": 0.1032821536064148, | |
| "learning_rate": 2.3963392660775575e-06, | |
| "loss": 0.0088, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.5703564727954973, | |
| "grad_norm": 0.10594484210014343, | |
| "learning_rate": 2.0707699088647836e-06, | |
| "loss": 0.0099, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.5891181988742966, | |
| "grad_norm": 0.0838296115398407, | |
| "learning_rate": 1.7684950240852372e-06, | |
| "loss": 0.0088, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.607879924953096, | |
| "grad_norm": 0.09543336927890778, | |
| "learning_rate": 1.4896615227983468e-06, | |
| "loss": 0.0091, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.626641651031895, | |
| "grad_norm": 0.1242263913154602, | |
| "learning_rate": 1.2344049231273302e-06, | |
| "loss": 0.0092, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.645403377110694, | |
| "grad_norm": 0.1851985901594162, | |
| "learning_rate": 1.00284928439493e-06, | |
| "loss": 0.0106, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.6641651031894935, | |
| "grad_norm": 0.08041252940893173, | |
| "learning_rate": 7.951071468283167e-07, | |
| "loss": 0.0098, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.682926829268293, | |
| "grad_norm": 0.11750051379203796, | |
| "learning_rate": 6.11279476862553e-07, | |
| "loss": 0.0096, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.7016885553470917, | |
| "grad_norm": 0.08895982056856155, | |
| "learning_rate": 4.514556180690188e-07, | |
| "loss": 0.0093, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.720450281425891, | |
| "grad_norm": 0.11088193953037262, | |
| "learning_rate": 3.157132477328628e-07, | |
| "loss": 0.0108, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.7392120075046904, | |
| "grad_norm": 0.08702103048563004, | |
| "learning_rate": 2.041183391004453e-07, | |
| "loss": 0.0083, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.7579737335834897, | |
| "grad_norm": 0.10216435045003891, | |
| "learning_rate": 1.1672512931509283e-07, | |
| "loss": 0.0098, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.776735459662289, | |
| "grad_norm": 0.11117573082447052, | |
| "learning_rate": 5.3576093056922906e-08, | |
| "loss": 0.0097, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.7954971857410884, | |
| "grad_norm": 0.1127699688076973, | |
| "learning_rate": 1.4701921899362703e-08, | |
| "loss": 0.01, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.8142589118198873, | |
| "grad_norm": 0.15439969301223755, | |
| "learning_rate": 1.2150939247002058e-10, | |
| "loss": 0.0112, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.8142589118198873, | |
| "step": 1500, | |
| "total_flos": 0.0, | |
| "train_loss": 0.0293558024764061, | |
| "train_runtime": 1931.3019, | |
| "train_samples_per_second": 77.668, | |
| "train_steps_per_second": 0.777 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 100, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |