Invalid JSON: Unexpected token 'N', ..."al_loss": NaN,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 325, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "grad_norm": 4.966057670734111, | |
| "learning_rate": 6.060606060606061e-06, | |
| "loss": 9.1692, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 4.4086373609083305, | |
| "learning_rate": 3.0303030303030306e-05, | |
| "loss": 9.1498, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 10.56943140466758, | |
| "learning_rate": 6.060606060606061e-05, | |
| "loss": 8.0701, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 1.0222409562078616, | |
| "learning_rate": 9.090909090909092e-05, | |
| "loss": 5.6026, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.08102902140854293, | |
| "learning_rate": 0.00012121212121212122, | |
| "loss": 5.2166, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.024683296200408666, | |
| "learning_rate": 0.00015151515151515152, | |
| "loss": 5.2558, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.05101498910262888, | |
| "learning_rate": 0.00018181818181818183, | |
| "loss": 5.4174, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.009746005540016655, | |
| "learning_rate": 0.00019997685019798912, | |
| "loss": 5.5556, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.41034811182637754, | |
| "learning_rate": 0.0001997165380022878, | |
| "loss": 5.6887, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.021653839538441525, | |
| "learning_rate": 0.000199167731989929, | |
| "loss": 5.6532, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.007119031812625819, | |
| "learning_rate": 0.0001983320199330545, | |
| "loss": 5.6134, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.002175080726276986, | |
| "learning_rate": 0.00019721181966290613, | |
| "loss": 5.6375, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.0016236968230193256, | |
| "learning_rate": 0.00019581037207470382, | |
| "loss": 5.6539, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.0008684584953792839, | |
| "learning_rate": 0.00019413173175128473, | |
| "loss": 5.6594, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.0007165250669874539, | |
| "learning_rate": 0.00019218075523263104, | |
| "loss": 5.6852, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.0013355276692502973, | |
| "learning_rate": 0.00018996308696522433, | |
| "loss": 5.7791, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.015776280146193138, | |
| "learning_rate": 0.00018748514297187648, | |
| "loss": 5.3361, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 0.016515416923740724, | |
| "learning_rate": 0.00018475409228928312, | |
| "loss": 5.0863, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.011518636937235155, | |
| "learning_rate": 0.00018177783622700327, | |
| "loss": 5.003, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 0.008431883403879271, | |
| "learning_rate": 0.00017856498550787144, | |
| "loss": 5.0254, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "grad_norm": 0.004208464354011368, | |
| "learning_rate": 0.00017512483535597867, | |
| "loss": 5.0571, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.0024543232426518812, | |
| "learning_rate": 0.00017146733860429612, | |
| "loss": 5.0774, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.011770237751697214, | |
| "learning_rate": 0.0001676030768997445, | |
| "loss": 5.0985, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.0012232543172927116, | |
| "learning_rate": 0.00016354323008901776, | |
| "loss": 5.0891, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.0011131940748631626, | |
| "learning_rate": 0.00015929954387373103, | |
| "loss": 5.1078, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 0.0007848533521529687, | |
| "learning_rate": 0.00015488429582847192, | |
| "loss": 5.1407, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.000942839738714448, | |
| "learning_rate": 0.00015031025988006936, | |
| "loss": 5.1133, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.0014645217129741282, | |
| "learning_rate": 0.00014559066935084588, | |
| "loss": 5.1195, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 0.0018167413381591129, | |
| "learning_rate": 0.00014073917867277557, | |
| "loss": 5.1258, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.0006483033060279427, | |
| "learning_rate": 0.0001357698238833126, | |
| "loss": 5.1156, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.000907685244310612, | |
| "learning_rate": 0.000130696982017182, | |
| "loss": 5.1156, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.0010163182837499274, | |
| "learning_rate": 0.0001255353295116187, | |
| "loss": 5.1156, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 0.0006269672357977566, | |
| "learning_rate": 0.00012029979974539234, | |
| "loss": 5.1063, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 0.0009630425191761279, | |
| "learning_rate": 0.00011500553983446527, | |
| "loss": 5.1227, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 0.0006541371379801539, | |
| "learning_rate": 0.00010966786680927874, | |
| "loss": 5.1235, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 0.0005339615585857347, | |
| "learning_rate": 0.00010430222330045304, | |
| "loss": 5.1164, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.0008697133023016856, | |
| "learning_rate": 9.892413286110886e-05, | |
| "loss": 5.1415, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 0.002710808242709564, | |
| "learning_rate": 9.354915505506839e-05, | |
| "loss": 5.1344, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 0.0036494198122248305, | |
| "learning_rate": 8.81928404408726e-05, | |
| "loss": 5.1399, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.0065159762311541875, | |
| "learning_rate": 8.287068558185225e-05, | |
| "loss": 5.1399, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.0037082380862026275, | |
| "learning_rate": 7.759808821241406e-05, | |
| "loss": 5.1265, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 0.0027408395630919608, | |
| "learning_rate": 7.239030269025311e-05, | |
| "loss": 5.1336, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.012854348458189542, | |
| "learning_rate": 6.726239586337408e-05, | |
| "loss": 5.1164, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 0.0015286542890117305, | |
| "learning_rate": 6.22292034796035e-05, | |
| "loss": 5.1094, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 0.000885838472289933, | |
| "learning_rate": 5.730528726470792e-05, | |
| "loss": 5.1133, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "grad_norm": 0.001640241431113671, | |
| "learning_rate": 5.2504892793295e-05, | |
| "loss": 5.1078, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 0.0012069300346476944, | |
| "learning_rate": 4.7841908274384616e-05, | |
| "loss": 5.1125, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.0010847699053696477, | |
| "learning_rate": 4.332982437088825e-05, | |
| "loss": 5.1008, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 0.001040653093371117, | |
| "learning_rate": 3.898169516924398e-05, | |
| "loss": 5.1047, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.0018113529113868085, | |
| "learning_rate": 3.4810100412128747e-05, | |
| "loss": 5.1016, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "grad_norm": 0.0005998767854279542, | |
| "learning_rate": 3.0827109103512643e-05, | |
| "loss": 5.1055, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.0006057369143316162, | |
| "learning_rate": 2.7044244591351232e-05, | |
| "loss": 5.1188, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.0008733820965525893, | |
| "learning_rate": 2.3472451228937253e-05, | |
| "loss": 5.1024, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 0.00044725420450702504, | |
| "learning_rate": 2.0122062711363532e-05, | |
| "loss": 5.1008, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 0.0005045243600866778, | |
| "learning_rate": 1.7002772178705716e-05, | |
| "loss": 5.1133, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.0003699276694513808, | |
| "learning_rate": 1.4123604172419713e-05, | |
| "loss": 5.1133, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 0.0005009069102438912, | |
| "learning_rate": 1.149288852608743e-05, | |
| "loss": 5.1141, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.00043804754145403135, | |
| "learning_rate": 9.118236266049707e-06, | |
| "loss": 5.1055, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 0.0007514667615813387, | |
| "learning_rate": 7.0065175916482095e-06, | |
| "loss": 5.0992, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "grad_norm": 0.0007652506213514495, | |
| "learning_rate": 5.163841998782837e-06, | |
| "loss": 5.0938, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.00043682966677225103, | |
| "learning_rate": 3.595540604290437e-06, | |
| "loss": 5.1133, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.0003666719447106826, | |
| "learning_rate": 2.30615072228183e-06, | |
| "loss": 5.1047, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.0002965553299134117, | |
| "learning_rate": 1.2994027370611173e-06, | |
| "loss": 5.1024, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 0.0005906234207107822, | |
| "learning_rate": 5.782093106048159e-07, | |
| "loss": 5.1039, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.00040431436972756634, | |
| "learning_rate": 1.446569558255395e-07, | |
| "loss": 5.1266, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.0011324307906468891, | |
| "learning_rate": 0.0, | |
| "loss": 5.1, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": NaN, | |
| "eval_runtime": 743.0674, | |
| "eval_samples_per_second": 1.556, | |
| "eval_steps_per_second": 0.389, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 325, | |
| "total_flos": 5242797101678592.0, | |
| "train_loss": 5.311349804217999, | |
| "train_runtime": 5936.4673, | |
| "train_samples_per_second": 1.751, | |
| "train_steps_per_second": 0.055 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 325, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 40, | |
| "total_flos": 5242797101678592.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |