Invalid JSON: Unexpected token 'N', ..."al_loss": NaN,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 325, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "grad_norm": 6.116353883934073, | |
| "learning_rate": 6.060606060606061e-06, | |
| "loss": 10.9077, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 5.276408725156457, | |
| "learning_rate": 3.0303030303030306e-05, | |
| "loss": 10.8028, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 7.553162631560424, | |
| "learning_rate": 6.060606060606061e-05, | |
| "loss": 10.1528, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.4291891781746429, | |
| "learning_rate": 9.090909090909092e-05, | |
| "loss": 8.605, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.10898194818740227, | |
| "learning_rate": 0.00012121212121212122, | |
| "loss": 8.4331, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.03189661005888827, | |
| "learning_rate": 0.00015151515151515152, | |
| "loss": 8.4675, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.02334602250201191, | |
| "learning_rate": 0.00018181818181818183, | |
| "loss": 8.472, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.01196690593776891, | |
| "learning_rate": 0.00019997685019798912, | |
| "loss": 8.4563, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 3.2907094840511184, | |
| "learning_rate": 0.0001997165380022878, | |
| "loss": 8.4385, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.0025910382161876, | |
| "learning_rate": 0.000199167731989929, | |
| "loss": 8.4094, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.0042103789632999345, | |
| "learning_rate": 0.0001983320199330545, | |
| "loss": 8.3828, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.002849586529470528, | |
| "learning_rate": 0.00019721181966290613, | |
| "loss": 8.3641, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.0011645283550178753, | |
| "learning_rate": 0.00019581037207470382, | |
| "loss": 8.3578, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.000614824705284003, | |
| "learning_rate": 0.00019413173175128473, | |
| "loss": 8.3426, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.0006912900930412507, | |
| "learning_rate": 0.00019218075523263104, | |
| "loss": 8.3484, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.005367571192250064, | |
| "learning_rate": 0.00018996308696522433, | |
| "loss": 8.4161, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.0606263367497006, | |
| "learning_rate": 0.00018748514297187648, | |
| "loss": 8.2675, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.01793815434279694, | |
| "learning_rate": 0.00018475409228928312, | |
| "loss": 8.2659, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.008717132473841635, | |
| "learning_rate": 0.00018177783622700327, | |
| "loss": 8.2502, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.0047497997520662555, | |
| "learning_rate": 0.00017856498550787144, | |
| "loss": 8.2204, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.0025777265518574194, | |
| "learning_rate": 0.00017512483535597867, | |
| "loss": 8.1985, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.0018423967258739478, | |
| "learning_rate": 0.00017146733860429612, | |
| "loss": 8.1782, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.0013575215364357142, | |
| "learning_rate": 0.0001676030768997445, | |
| "loss": 8.1844, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.0009592164974413616, | |
| "learning_rate": 0.00016354323008901776, | |
| "loss": 8.1625, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.0008181054725389062, | |
| "learning_rate": 0.00015929954387373103, | |
| "loss": 8.1688, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.0009746727182352604, | |
| "learning_rate": 0.00015488429582847192, | |
| "loss": 8.194, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.0015308543324832951, | |
| "learning_rate": 0.00015031025988006936, | |
| "loss": 8.1547, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.0017756159985463486, | |
| "learning_rate": 0.00014559066935084588, | |
| "loss": 8.1391, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.0015224483018365321, | |
| "learning_rate": 0.00014073917867277557, | |
| "loss": 8.1406, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.0015693594443860022, | |
| "learning_rate": 0.0001357698238833126, | |
| "loss": 8.1266, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.0010677737954625072, | |
| "learning_rate": 0.000130696982017182, | |
| "loss": 8.125, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.0007802127959643028, | |
| "learning_rate": 0.0001255353295116187, | |
| "loss": 8.1391, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 0.0006989059044981654, | |
| "learning_rate": 0.00012029979974539234, | |
| "loss": 8.1328, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 0.0009119182874070918, | |
| "learning_rate": 0.00011500553983446527, | |
| "loss": 8.1359, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.0005539230642084518, | |
| "learning_rate": 0.00010966786680927874, | |
| "loss": 8.1313, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 0.0005005591061251601, | |
| "learning_rate": 0.00010430222330045304, | |
| "loss": 8.1422, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.0005132965742146893, | |
| "learning_rate": 9.892413286110886e-05, | |
| "loss": 8.1822, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 0.0014694386938598014, | |
| "learning_rate": 9.354915505506839e-05, | |
| "loss": 8.1359, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.0024110842942386573, | |
| "learning_rate": 8.81928404408726e-05, | |
| "loss": 8.1313, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.0023927225333756773, | |
| "learning_rate": 8.287068558185225e-05, | |
| "loss": 8.1328, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.032000474157054606, | |
| "learning_rate": 7.759808821241406e-05, | |
| "loss": 8.1577, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 0.014912360829388252, | |
| "learning_rate": 7.239030269025311e-05, | |
| "loss": 8.1126, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.006980691127090787, | |
| "learning_rate": 6.726239586337408e-05, | |
| "loss": 8.111, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 0.0027905033268489166, | |
| "learning_rate": 6.22292034796035e-05, | |
| "loss": 8.1094, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.0017283607289640386, | |
| "learning_rate": 5.730528726470792e-05, | |
| "loss": 8.1047, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "grad_norm": 0.002264196853860243, | |
| "learning_rate": 5.2504892793295e-05, | |
| "loss": 8.1047, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 0.0012025000857218745, | |
| "learning_rate": 4.7841908274384616e-05, | |
| "loss": 8.1156, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.0008764729176935806, | |
| "learning_rate": 4.332982437088825e-05, | |
| "loss": 8.1047, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 0.0011503436093413008, | |
| "learning_rate": 3.898169516924398e-05, | |
| "loss": 8.1109, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.0007409622754668202, | |
| "learning_rate": 3.4810100412128747e-05, | |
| "loss": 8.0875, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "grad_norm": 0.0006627805954961152, | |
| "learning_rate": 3.0827109103512643e-05, | |
| "loss": 8.1016, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.0008662071988656678, | |
| "learning_rate": 2.7044244591351232e-05, | |
| "loss": 8.1047, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.0005382928432685694, | |
| "learning_rate": 2.3472451228937253e-05, | |
| "loss": 8.1, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 0.0005771465089242338, | |
| "learning_rate": 2.0122062711363532e-05, | |
| "loss": 8.0938, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 0.0007199434560267525, | |
| "learning_rate": 1.7002772178705716e-05, | |
| "loss": 8.1109, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.000623592568507816, | |
| "learning_rate": 1.4123604172419713e-05, | |
| "loss": 8.0984, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 0.0007635159116452594, | |
| "learning_rate": 1.149288852608743e-05, | |
| "loss": 8.0984, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.0005875388330864554, | |
| "learning_rate": 9.118236266049707e-06, | |
| "loss": 8.1047, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 0.00048181738292063823, | |
| "learning_rate": 7.0065175916482095e-06, | |
| "loss": 8.1016, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "grad_norm": 0.0006907931371687696, | |
| "learning_rate": 5.163841998782837e-06, | |
| "loss": 8.1016, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.0005165153083703728, | |
| "learning_rate": 3.595540604290437e-06, | |
| "loss": 8.0984, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.0004680021239548418, | |
| "learning_rate": 2.30615072228183e-06, | |
| "loss": 8.0969, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.00047907728646944946, | |
| "learning_rate": 1.2994027370611173e-06, | |
| "loss": 8.0969, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 0.0004836942034123741, | |
| "learning_rate": 5.782093106048159e-07, | |
| "loss": 8.1016, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.0005764125024224334, | |
| "learning_rate": 1.446569558255395e-07, | |
| "loss": 8.1032, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.0006775781098562137, | |
| "learning_rate": 0.0, | |
| "loss": 8.0938, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": NaN, | |
| "eval_runtime": 747.3678, | |
| "eval_samples_per_second": 1.547, | |
| "eval_steps_per_second": 0.387, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 325, | |
| "total_flos": 5242797101678592.0, | |
| "train_loss": 8.265161878145658, | |
| "train_runtime": 5949.9241, | |
| "train_samples_per_second": 1.747, | |
| "train_steps_per_second": 0.055 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 325, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 40, | |
| "total_flos": 5242797101678592.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |