| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.549520766773163, | |
| "eval_steps": 500, | |
| "global_step": 400, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06389776357827476, | |
| "grad_norm": 20.377737713693822, | |
| "learning_rate": 1.8750000000000003e-06, | |
| "loss": 2.5741, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.12779552715654952, | |
| "grad_norm": 4.615055699717414, | |
| "learning_rate": 3.958333333333333e-06, | |
| "loss": 1.8858, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.19169329073482427, | |
| "grad_norm": 2.7878960752665805, | |
| "learning_rate": 6.041666666666667e-06, | |
| "loss": 1.3172, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.25559105431309903, | |
| "grad_norm": 2.1443618886066678, | |
| "learning_rate": 8.125000000000001e-06, | |
| "loss": 1.0762, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3194888178913738, | |
| "grad_norm": 2.083564361550629, | |
| "learning_rate": 9.999862102299874e-06, | |
| "loss": 0.9691, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.38338658146964855, | |
| "grad_norm": 2.027770037241651, | |
| "learning_rate": 9.983323579940351e-06, | |
| "loss": 0.9239, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4472843450479233, | |
| "grad_norm": 1.8790492807169699, | |
| "learning_rate": 9.939310009499348e-06, | |
| "loss": 0.8883, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5111821086261981, | |
| "grad_norm": 1.7919483636147056, | |
| "learning_rate": 9.868064055324204e-06, | |
| "loss": 0.8785, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5750798722044729, | |
| "grad_norm": 1.97998264662781, | |
| "learning_rate": 9.76997852474223e-06, | |
| "loss": 0.8606, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6389776357827476, | |
| "grad_norm": 1.7621305607359916, | |
| "learning_rate": 9.645594202357438e-06, | |
| "loss": 0.8499, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7028753993610224, | |
| "grad_norm": 1.7524493187730084, | |
| "learning_rate": 9.495596868489588e-06, | |
| "loss": 0.8322, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7667731629392971, | |
| "grad_norm": 1.8949002802030435, | |
| "learning_rate": 9.320813518194084e-06, | |
| "loss": 0.8225, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8306709265175719, | |
| "grad_norm": 1.896682860936547, | |
| "learning_rate": 9.122207801708802e-06, | |
| "loss": 0.8242, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.8945686900958466, | |
| "grad_norm": 1.9529011844472832, | |
| "learning_rate": 8.900874711466436e-06, | |
| "loss": 0.8026, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9584664536741214, | |
| "grad_norm": 1.8816098587424452, | |
| "learning_rate": 8.658034544965003e-06, | |
| "loss": 0.8046, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0191693290734825, | |
| "grad_norm": 1.7513144878089228, | |
| "learning_rate": 8.395026176781627e-06, | |
| "loss": 0.7811, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.0830670926517572, | |
| "grad_norm": 1.6927871553024794, | |
| "learning_rate": 8.113299676823614e-06, | |
| "loss": 0.7099, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.1469648562300319, | |
| "grad_norm": 1.8501286355982487, | |
| "learning_rate": 7.814408315515419e-06, | |
| "loss": 0.716, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.2108626198083068, | |
| "grad_norm": 1.898746762667725, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.6966, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.2747603833865815, | |
| "grad_norm": 2.0376095953208186, | |
| "learning_rate": 7.1718081885702905e-06, | |
| "loss": 0.6988, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.3386581469648562, | |
| "grad_norm": 2.0679265754168097, | |
| "learning_rate": 6.831642333423068e-06, | |
| "loss": 0.6995, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.4025559105431311, | |
| "grad_norm": 1.7425470695615182, | |
| "learning_rate": 6.481377904428171e-06, | |
| "loss": 0.6849, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.4664536741214058, | |
| "grad_norm": 1.7480874168585598, | |
| "learning_rate": 6.122946048915991e-06, | |
| "loss": 0.6797, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.5303514376996805, | |
| "grad_norm": 1.932258961098815, | |
| "learning_rate": 5.75832294449293e-06, | |
| "loss": 0.6826, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.5942492012779552, | |
| "grad_norm": 1.795120951075422, | |
| "learning_rate": 5.389518903587016e-06, | |
| "loss": 0.6794, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.65814696485623, | |
| "grad_norm": 2.102611176384133, | |
| "learning_rate": 5.0185672897946515e-06, | |
| "loss": 0.6609, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.7220447284345048, | |
| "grad_norm": 1.866705749251324, | |
| "learning_rate": 4.647513307137076e-06, | |
| "loss": 0.6741, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.7859424920127795, | |
| "grad_norm": 1.7566750001867661, | |
| "learning_rate": 4.278402724035868e-06, | |
| "loss": 0.6619, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.8498402555910545, | |
| "grad_norm": 1.814186513384251, | |
| "learning_rate": 3.913270594176665e-06, | |
| "loss": 0.6529, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.9137380191693292, | |
| "grad_norm": 1.8183647565801306, | |
| "learning_rate": 3.5541300364475067e-06, | |
| "loss": 0.6406, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.9776357827476039, | |
| "grad_norm": 1.9111185282349554, | |
| "learning_rate": 3.202961135812437e-06, | |
| "loss": 0.6302, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.038338658146965, | |
| "grad_norm": 1.940940094549139, | |
| "learning_rate": 2.861700026314308e-06, | |
| "loss": 0.5852, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.1022364217252396, | |
| "grad_norm": 1.812424693455716, | |
| "learning_rate": 2.5322282163965096e-06, | |
| "loss": 0.554, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.1661341853035143, | |
| "grad_norm": 1.9758944605649846, | |
| "learning_rate": 2.216362215397393e-06, | |
| "loss": 0.5457, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.230031948881789, | |
| "grad_norm": 1.846108330503614, | |
| "learning_rate": 1.91584351841065e-06, | |
| "loss": 0.537, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.2939297124600637, | |
| "grad_norm": 1.9713773985567047, | |
| "learning_rate": 1.6323290047291196e-06, | |
| "loss": 0.5249, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.357827476038339, | |
| "grad_norm": 1.96920367350381, | |
| "learning_rate": 1.367381802809185e-06, | |
| "loss": 0.521, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.4217252396166136, | |
| "grad_norm": 1.8545979654793228, | |
| "learning_rate": 1.1224626721209141e-06, | |
| "loss": 0.5263, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.4856230031948883, | |
| "grad_norm": 1.8666939617951708, | |
| "learning_rate": 8.989219493991791e-07, | |
| "loss": 0.5183, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.549520766773163, | |
| "grad_norm": 1.976527350667108, | |
| "learning_rate": 6.979921036993042e-07, | |
| "loss": 0.5214, | |
| "step": 400 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 471, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 31500009209856.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |