Invalid JSON: Unexpected token 'N', ..."al_loss": NaN,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 650, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "grad_norm": 0.22638954581615983, | |
| "learning_rate": 3.0769230769230774e-06, | |
| "loss": 0.8364, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 0.1813039257679487, | |
| "learning_rate": 1.5384615384615387e-05, | |
| "loss": 0.8034, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 0.17727989080166365, | |
| "learning_rate": 3.0769230769230774e-05, | |
| "loss": 0.7942, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 0.16805037708786894, | |
| "learning_rate": 4.615384615384616e-05, | |
| "loss": 0.8146, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 0.21617629507877206, | |
| "learning_rate": 6.153846153846155e-05, | |
| "loss": 0.7828, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 0.20939368966196042, | |
| "learning_rate": 7.692307692307693e-05, | |
| "loss": 0.7701, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.1980708158646925, | |
| "learning_rate": 9.230769230769232e-05, | |
| "loss": 0.8039, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.1727912158397291, | |
| "learning_rate": 0.0001076923076923077, | |
| "loss": 0.7828, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.18556767932502666, | |
| "learning_rate": 0.0001230769230769231, | |
| "loss": 0.7467, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.2557061081364788, | |
| "learning_rate": 0.00013846153846153847, | |
| "loss": 0.7449, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.19745764432673993, | |
| "learning_rate": 0.00015384615384615385, | |
| "loss": 0.7572, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.20505129132890218, | |
| "learning_rate": 0.00016923076923076923, | |
| "loss": 0.7751, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.20649448170626705, | |
| "learning_rate": 0.00018461538461538463, | |
| "loss": 0.7602, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.18752216144043785, | |
| "learning_rate": 0.0002, | |
| "loss": 0.8063, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.22508963348939887, | |
| "learning_rate": 0.00019996395276708856, | |
| "loss": 0.7611, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.18833225533556988, | |
| "learning_rate": 0.00019985583705641418, | |
| "loss": 0.7542, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.18925044414460548, | |
| "learning_rate": 0.00019967573081342103, | |
| "loss": 0.7973, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 0.19417928462178893, | |
| "learning_rate": 0.0001994237638847428, | |
| "loss": 0.7084, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.20495423399654503, | |
| "learning_rate": 0.00019910011792459087, | |
| "loss": 0.7428, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.19400035198889168, | |
| "learning_rate": 0.00019870502626379127, | |
| "loss": 0.7826, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.18597079746230874, | |
| "learning_rate": 0.00019823877374156647, | |
| "loss": 0.7836, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.18679845602710174, | |
| "learning_rate": 0.00019770169650018172, | |
| "loss": 0.765, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.18479380410859886, | |
| "learning_rate": 0.0001970941817426052, | |
| "loss": 0.7534, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.18138845502090514, | |
| "learning_rate": 0.00019641666745335624, | |
| "loss": 0.7577, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.17448186193560006, | |
| "learning_rate": 0.00019566964208274254, | |
| "loss": 0.7799, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.19208143444574954, | |
| "learning_rate": 0.00019485364419471454, | |
| "loss": 0.7689, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.1622511683293268, | |
| "learning_rate": 0.00019396926207859084, | |
| "loss": 0.7375, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.18313460619011496, | |
| "learning_rate": 0.00019301713332493386, | |
| "loss": 0.7385, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.17951515004814617, | |
| "learning_rate": 0.00019199794436588243, | |
| "loss": 0.7399, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.17473307701756932, | |
| "learning_rate": 0.0001909124299802724, | |
| "loss": 0.7594, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.16310885248098056, | |
| "learning_rate": 0.0001897613727639014, | |
| "loss": 0.7178, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.1895983902895304, | |
| "learning_rate": 0.000188545602565321, | |
| "loss": 0.7259, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.17011783213385412, | |
| "learning_rate": 0.00018726599588756145, | |
| "loss": 0.7663, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.19740064630501092, | |
| "learning_rate": 0.0001859234752562217, | |
| "loss": 0.7835, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.176460816673931, | |
| "learning_rate": 0.0001845190085543795, | |
| "loss": 0.7742, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 0.1776861592924501, | |
| "learning_rate": 0.00018305360832480117, | |
| "loss": 0.7483, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.18361359295861726, | |
| "learning_rate": 0.00018152833103995443, | |
| "loss": 0.767, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.17604664269508136, | |
| "learning_rate": 0.00017994427634035015, | |
| "loss": 0.76, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.18343023724564714, | |
| "learning_rate": 0.00017830258624176225, | |
| "loss": 0.7665, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.17483015598221394, | |
| "learning_rate": 0.0001766044443118978, | |
| "loss": 0.7239, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.1778755737742503, | |
| "learning_rate": 0.00017485107481711012, | |
| "loss": 0.7617, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.17763024228280486, | |
| "learning_rate": 0.00017304374183977033, | |
| "loss": 0.7664, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.17405205010652272, | |
| "learning_rate": 0.00017118374836693406, | |
| "loss": 0.7568, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 0.20068956314633762, | |
| "learning_rate": 0.00016927243535095997, | |
| "loss": 0.7614, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.17815075790056695, | |
| "learning_rate": 0.00016731118074275704, | |
| "loss": 0.7445, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.1732484639432504, | |
| "learning_rate": 0.0001653013984983585, | |
| "loss": 0.7413, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.16182846941484944, | |
| "learning_rate": 0.00016324453755953773, | |
| "loss": 0.7411, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 0.18502931691402047, | |
| "learning_rate": 0.00016114208080920123, | |
| "loss": 0.7567, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.19284513271580453, | |
| "learning_rate": 0.00015899554400231232, | |
| "loss": 0.7611, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.18204918517543528, | |
| "learning_rate": 0.00015680647467311557, | |
| "loss": 0.7637, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.18152147568265412, | |
| "learning_rate": 0.00015457645101945046, | |
| "loss": 0.7613, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.17373412443807332, | |
| "learning_rate": 0.00015230708076495775, | |
| "loss": 0.734, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.20935352135186272, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 0.7503, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.19127094935614322, | |
| "learning_rate": 0.0001476568720021308, | |
| "loss": 0.7649, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.18459794613971947, | |
| "learning_rate": 0.00014527938603696376, | |
| "loss": 0.7582, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.16739519013438775, | |
| "learning_rate": 0.00014286925614030542, | |
| "loss": 0.7028, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.17618630132316776, | |
| "learning_rate": 0.0001404282198824305, | |
| "loss": 0.7759, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.1742265666097768, | |
| "learning_rate": 0.00013795803711538966, | |
| "loss": 0.7499, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.16927687855060541, | |
| "learning_rate": 0.00013546048870425356, | |
| "loss": 0.7785, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.16526453726289647, | |
| "learning_rate": 0.00013293737524320797, | |
| "loss": 0.707, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.16424167922354121, | |
| "learning_rate": 0.0001303905157574247, | |
| "loss": 0.7736, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "grad_norm": 0.1689166927092904, | |
| "learning_rate": 0.0001278217463916453, | |
| "loss": 0.7376, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.17538502991890237, | |
| "learning_rate": 0.00012523291908642217, | |
| "loss": 0.7561, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.16834661737082066, | |
| "learning_rate": 0.00012262590024297225, | |
| "loss": 0.7879, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 0.18075535513747248, | |
| "learning_rate": 0.00012000256937760445, | |
| "loss": 0.76, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.18435217046485927, | |
| "learning_rate": 0.00011736481776669306, | |
| "loss": 0.7634, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 0.19534270424648292, | |
| "learning_rate": 0.00011471454708317162, | |
| "loss": 0.7289, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.16179391514607225, | |
| "learning_rate": 0.0001120536680255323, | |
| "loss": 0.7757, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.16984696045342035, | |
| "learning_rate": 0.00010938409894031794, | |
| "loss": 0.7728, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 0.17164253966708626, | |
| "learning_rate": 0.00010670776443910024, | |
| "loss": 0.7567, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 0.1762947619639153, | |
| "learning_rate": 0.00010402659401094152, | |
| "loss": 0.7622, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.1643638219047145, | |
| "learning_rate": 0.00010134252063133975, | |
| "loss": 0.7445, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.18083244859851283, | |
| "learning_rate": 9.865747936866027e-05, | |
| "loss": 0.7267, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.17258431530932208, | |
| "learning_rate": 9.597340598905852e-05, | |
| "loss": 0.7365, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 0.16200859086797756, | |
| "learning_rate": 9.329223556089975e-05, | |
| "loss": 0.7387, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.16774671238035113, | |
| "learning_rate": 9.061590105968208e-05, | |
| "loss": 0.7318, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.1692828345651934, | |
| "learning_rate": 8.79463319744677e-05, | |
| "loss": 0.7896, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "grad_norm": 0.15934964070723345, | |
| "learning_rate": 8.528545291682838e-05, | |
| "loss": 0.6854, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.1550634713902335, | |
| "learning_rate": 8.263518223330697e-05, | |
| "loss": 0.7837, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "grad_norm": 0.1607860378586748, | |
| "learning_rate": 7.999743062239557e-05, | |
| "loss": 0.7537, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.16982363325460645, | |
| "learning_rate": 7.73740997570278e-05, | |
| "loss": 0.7476, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.1787899661184355, | |
| "learning_rate": 7.476708091357782e-05, | |
| "loss": 0.7292, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 0.17032167101003257, | |
| "learning_rate": 7.217825360835473e-05, | |
| "loss": 0.7481, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.1808269404462512, | |
| "learning_rate": 6.960948424257532e-05, | |
| "loss": 0.7367, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.17901906299647485, | |
| "learning_rate": 6.706262475679205e-05, | |
| "loss": 0.7605, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.18834056302806162, | |
| "learning_rate": 6.453951129574644e-05, | |
| "loss": 0.788, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 0.18253089327197253, | |
| "learning_rate": 6.204196288461037e-05, | |
| "loss": 0.7485, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 0.1896961988867079, | |
| "learning_rate": 5.957178011756952e-05, | |
| "loss": 0.7462, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.18760202699871834, | |
| "learning_rate": 5.713074385969457e-05, | |
| "loss": 0.756, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.1773529126283878, | |
| "learning_rate": 5.472061396303629e-05, | |
| "loss": 0.7426, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "grad_norm": 0.1669653708836188, | |
| "learning_rate": 5.234312799786921e-05, | |
| "loss": 0.7214, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.16724226423303531, | |
| "learning_rate": 5.000000000000002e-05, | |
| "loss": 0.7554, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 0.1605488611045014, | |
| "learning_rate": 4.7692919235042255e-05, | |
| "loss": 0.7395, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.1724230273686043, | |
| "learning_rate": 4.542354898054953e-05, | |
| "loss": 0.6914, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.18876308185046786, | |
| "learning_rate": 4.3193525326884435e-05, | |
| "loss": 0.774, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 0.15832807229666745, | |
| "learning_rate": 4.100445599768774e-05, | |
| "loss": 0.7285, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 0.20134778903646663, | |
| "learning_rate": 3.885791919079878e-05, | |
| "loss": 0.7513, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.17746064473235634, | |
| "learning_rate": 3.675546244046228e-05, | |
| "loss": 0.7175, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.17200683319654153, | |
| "learning_rate": 3.469860150164152e-05, | |
| "loss": 0.7321, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.15989134079339526, | |
| "learning_rate": 3.268881925724297e-05, | |
| "loss": 0.7379, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "grad_norm": 0.15850580729842006, | |
| "learning_rate": 3.072756464904006e-05, | |
| "loss": 0.7376, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.18738024330457914, | |
| "learning_rate": 2.881625163306596e-05, | |
| "loss": 0.7471, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.1666154545377965, | |
| "learning_rate": 2.6956258160229695e-05, | |
| "loss": 0.7281, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 0.1876771320924796, | |
| "learning_rate": 2.514892518288988e-05, | |
| "loss": 0.7482, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.16905049400258154, | |
| "learning_rate": 2.339555568810221e-05, | |
| "loss": 0.749, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "grad_norm": 0.18106716736760403, | |
| "learning_rate": 2.1697413758237784e-05, | |
| "loss": 0.7601, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 0.1616487448106075, | |
| "learning_rate": 2.0055723659649904e-05, | |
| "loss": 0.7509, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 0.16890169036482472, | |
| "learning_rate": 1.8471668960045574e-05, | |
| "loss": 0.7066, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 0.1632645450151788, | |
| "learning_rate": 1.6946391675198836e-05, | |
| "loss": 0.7396, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.18148424988604053, | |
| "learning_rate": 1.5480991445620542e-05, | |
| "loss": 0.7602, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.16966068643276655, | |
| "learning_rate": 1.4076524743778319e-05, | |
| "loss": 0.7002, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.16674840971276483, | |
| "learning_rate": 1.2734004112438568e-05, | |
| "loss": 0.7596, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 0.16780234279195239, | |
| "learning_rate": 1.1454397434679021e-05, | |
| "loss": 0.7404, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 0.1688456301085619, | |
| "learning_rate": 1.0238627236098619e-05, | |
| "loss": 0.742, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.16354206297853066, | |
| "learning_rate": 9.08757001972762e-06, | |
| "loss": 0.7227, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.17015103460312153, | |
| "learning_rate": 8.002055634117578e-06, | |
| "loss": 0.7265, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 0.17411041321482185, | |
| "learning_rate": 6.9828666750661795e-06, | |
| "loss": 0.7408, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.15987450052661933, | |
| "learning_rate": 6.030737921409169e-06, | |
| "loss": 0.7541, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "grad_norm": 0.16461391402486866, | |
| "learning_rate": 5.146355805285452e-06, | |
| "loss": 0.7288, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.17493762678626293, | |
| "learning_rate": 4.3303579172574885e-06, | |
| "loss": 0.7451, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.16247847988130878, | |
| "learning_rate": 3.5833325466437694e-06, | |
| "loss": 0.7299, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "grad_norm": 0.1643216040963927, | |
| "learning_rate": 2.905818257394799e-06, | |
| "loss": 0.7473, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.16603719389852187, | |
| "learning_rate": 2.2983034998182997e-06, | |
| "loss": 0.7345, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.1905014253547241, | |
| "learning_rate": 1.7612262584335237e-06, | |
| "loss": 0.7193, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.17400163902350113, | |
| "learning_rate": 1.2949737362087156e-06, | |
| "loss": 0.7942, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.17549556714092185, | |
| "learning_rate": 8.998820754091531e-07, | |
| "loss": 0.7735, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 0.17592545054253697, | |
| "learning_rate": 5.762361152572115e-07, | |
| "loss": 0.7364, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.18023784894742628, | |
| "learning_rate": 3.2426918657900704e-07, | |
| "loss": 0.7338, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.17195796913696132, | |
| "learning_rate": 1.4416294358582384e-07, | |
| "loss": 0.7572, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "grad_norm": 0.17111312234723997, | |
| "learning_rate": 3.60472329114625e-08, | |
| "loss": 0.7365, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.18043206582301746, | |
| "learning_rate": 0.0, | |
| "loss": 0.7291, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": NaN, | |
| "eval_runtime": 2999.5091, | |
| "eval_samples_per_second": 1.541, | |
| "eval_steps_per_second": 0.385, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 650, | |
| "total_flos": 7820976031531008.0, | |
| "train_loss": 0.7519041672119727, | |
| "train_runtime": 26444.6436, | |
| "train_samples_per_second": 1.572, | |
| "train_steps_per_second": 0.025 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 650, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 7820976031531008.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |