| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.4982456140350875, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07017543859649122, | |
| "grad_norm": 5993.8466796875, | |
| "learning_rate": 1.8e-05, | |
| "loss": 0.1944, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14035087719298245, | |
| "grad_norm": 3562.712646484375, | |
| "learning_rate": 3.8e-05, | |
| "loss": 0.1443, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.21052631578947367, | |
| "grad_norm": 3539.65087890625, | |
| "learning_rate": 5.8e-05, | |
| "loss": 0.1224, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.2807017543859649, | |
| "grad_norm": 3544.288330078125, | |
| "learning_rate": 7.800000000000001e-05, | |
| "loss": 0.0958, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3508771929824561, | |
| "grad_norm": 5689.3173828125, | |
| "learning_rate": 9.8e-05, | |
| "loss": 0.1047, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.42105263157894735, | |
| "grad_norm": 4658.375, | |
| "learning_rate": 9.827586206896552e-05, | |
| "loss": 0.0718, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.49122807017543857, | |
| "grad_norm": 4036.04052734375, | |
| "learning_rate": 9.6360153256705e-05, | |
| "loss": 0.0741, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5614035087719298, | |
| "grad_norm": 4587.62841796875, | |
| "learning_rate": 9.444444444444444e-05, | |
| "loss": 0.068, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.631578947368421, | |
| "grad_norm": 4352.63671875, | |
| "learning_rate": 9.252873563218392e-05, | |
| "loss": 0.0653, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7017543859649122, | |
| "grad_norm": 5600.20703125, | |
| "learning_rate": 9.061302681992338e-05, | |
| "loss": 0.0679, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7719298245614035, | |
| "grad_norm": 3705.227294921875, | |
| "learning_rate": 8.869731800766284e-05, | |
| "loss": 0.0623, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.8421052631578947, | |
| "grad_norm": 4062.218505859375, | |
| "learning_rate": 8.67816091954023e-05, | |
| "loss": 0.0622, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.9122807017543859, | |
| "grad_norm": 4199.43994140625, | |
| "learning_rate": 8.486590038314178e-05, | |
| "loss": 0.059, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9824561403508771, | |
| "grad_norm": 3701.624267578125, | |
| "learning_rate": 8.295019157088123e-05, | |
| "loss": 0.0588, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.0491228070175438, | |
| "grad_norm": 4788.91552734375, | |
| "learning_rate": 8.103448275862069e-05, | |
| "loss": 0.0598, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.119298245614035, | |
| "grad_norm": 3091.700439453125, | |
| "learning_rate": 7.911877394636016e-05, | |
| "loss": 0.056, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.1894736842105262, | |
| "grad_norm": 3443.362548828125, | |
| "learning_rate": 7.720306513409961e-05, | |
| "loss": 0.0557, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.2596491228070175, | |
| "grad_norm": 3259.4931640625, | |
| "learning_rate": 7.528735632183909e-05, | |
| "loss": 0.0495, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.329824561403509, | |
| "grad_norm": 3825.1728515625, | |
| "learning_rate": 7.337164750957855e-05, | |
| "loss": 0.0535, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 3203.66552734375, | |
| "learning_rate": 7.145593869731801e-05, | |
| "loss": 0.0489, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.4701754385964914, | |
| "grad_norm": 4477.1640625, | |
| "learning_rate": 6.954022988505747e-05, | |
| "loss": 0.0508, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.5403508771929824, | |
| "grad_norm": 4661.5703125, | |
| "learning_rate": 6.762452107279695e-05, | |
| "loss": 0.0436, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.6105263157894738, | |
| "grad_norm": 3914.212890625, | |
| "learning_rate": 6.570881226053641e-05, | |
| "loss": 0.0496, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.6807017543859648, | |
| "grad_norm": 5168.2548828125, | |
| "learning_rate": 6.379310344827587e-05, | |
| "loss": 0.0517, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.7508771929824563, | |
| "grad_norm": 3639.001953125, | |
| "learning_rate": 6.187739463601533e-05, | |
| "loss": 0.0522, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.8210526315789473, | |
| "grad_norm": 4367.4833984375, | |
| "learning_rate": 5.9961685823754786e-05, | |
| "loss": 0.0502, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.8912280701754387, | |
| "grad_norm": 3486.47607421875, | |
| "learning_rate": 5.8045977011494254e-05, | |
| "loss": 0.0456, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.9614035087719297, | |
| "grad_norm": 3831.04736328125, | |
| "learning_rate": 5.6130268199233716e-05, | |
| "loss": 0.0486, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.0280701754385966, | |
| "grad_norm": 3694.040283203125, | |
| "learning_rate": 5.4214559386973184e-05, | |
| "loss": 0.0413, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.0982456140350876, | |
| "grad_norm": 3733.336669921875, | |
| "learning_rate": 5.2298850574712646e-05, | |
| "loss": 0.046, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.168421052631579, | |
| "grad_norm": 4054.663818359375, | |
| "learning_rate": 5.0383141762452114e-05, | |
| "loss": 0.0429, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.23859649122807, | |
| "grad_norm": 3458.208251953125, | |
| "learning_rate": 4.846743295019157e-05, | |
| "loss": 0.042, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.3087719298245615, | |
| "grad_norm": 4682.77294921875, | |
| "learning_rate": 4.655172413793104e-05, | |
| "loss": 0.0415, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 2.3789473684210525, | |
| "grad_norm": 3480.484130859375, | |
| "learning_rate": 4.46360153256705e-05, | |
| "loss": 0.0398, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.449122807017544, | |
| "grad_norm": 3643.171630859375, | |
| "learning_rate": 4.272030651340996e-05, | |
| "loss": 0.0403, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.519298245614035, | |
| "grad_norm": 3384.7724609375, | |
| "learning_rate": 4.080459770114943e-05, | |
| "loss": 0.0403, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.5894736842105264, | |
| "grad_norm": 5074.3017578125, | |
| "learning_rate": 3.888888888888889e-05, | |
| "loss": 0.0451, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.659649122807018, | |
| "grad_norm": 4794.37646484375, | |
| "learning_rate": 3.697318007662835e-05, | |
| "loss": 0.0409, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.729824561403509, | |
| "grad_norm": 3055.15869140625, | |
| "learning_rate": 3.505747126436782e-05, | |
| "loss": 0.0394, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 4893.79052734375, | |
| "learning_rate": 3.314176245210728e-05, | |
| "loss": 0.0471, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.8701754385964913, | |
| "grad_norm": 4101.93017578125, | |
| "learning_rate": 3.1226053639846744e-05, | |
| "loss": 0.0478, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.9403508771929827, | |
| "grad_norm": 3935.029052734375, | |
| "learning_rate": 2.9310344827586206e-05, | |
| "loss": 0.0381, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 3.007017543859649, | |
| "grad_norm": 4287.02880859375, | |
| "learning_rate": 2.739463601532567e-05, | |
| "loss": 0.0397, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 3.07719298245614, | |
| "grad_norm": 3702.27490234375, | |
| "learning_rate": 2.5478927203065132e-05, | |
| "loss": 0.0355, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 3.1473684210526316, | |
| "grad_norm": 4088.54052734375, | |
| "learning_rate": 2.3563218390804597e-05, | |
| "loss": 0.0356, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 3.2175438596491226, | |
| "grad_norm": 5307.07470703125, | |
| "learning_rate": 2.1647509578544062e-05, | |
| "loss": 0.0426, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 3.287719298245614, | |
| "grad_norm": 4035.5810546875, | |
| "learning_rate": 1.9731800766283527e-05, | |
| "loss": 0.0378, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 3.3578947368421055, | |
| "grad_norm": 4500.57470703125, | |
| "learning_rate": 1.781609195402299e-05, | |
| "loss": 0.0424, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 3.4280701754385965, | |
| "grad_norm": 4739.67431640625, | |
| "learning_rate": 1.5900383141762454e-05, | |
| "loss": 0.041, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 3.4982456140350875, | |
| "grad_norm": 3640.994384765625, | |
| "learning_rate": 1.3984674329501916e-05, | |
| "loss": 0.0381, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 572, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.4406372122423296e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |