| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 100, | |
| "global_step": 450, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022222222222222223, | |
| "grad_norm": 15.23481298955369, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 1.405, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.044444444444444446, | |
| "grad_norm": 14.775087887271988, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 1.11, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06666666666666667, | |
| "grad_norm": 8.844204398433797, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 1.0618, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.08888888888888889, | |
| "grad_norm": 11.430917605582387, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 1.1446, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.1111111111111111, | |
| "grad_norm": 11.800583302678678, | |
| "learning_rate": 9.996239762521152e-06, | |
| "loss": 1.1175, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13333333333333333, | |
| "grad_norm": 8.386548386677672, | |
| "learning_rate": 9.966191788709716e-06, | |
| "loss": 1.2044, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15555555555555556, | |
| "grad_norm": 10.002087983024431, | |
| "learning_rate": 9.906276553136924e-06, | |
| "loss": 1.1394, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.17777777777777778, | |
| "grad_norm": 9.19270068765703, | |
| "learning_rate": 9.816854393079402e-06, | |
| "loss": 1.1235, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 9.367507898324098, | |
| "learning_rate": 9.698463103929542e-06, | |
| "loss": 1.1788, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 7.398439137570879, | |
| "learning_rate": 9.551814704830734e-06, | |
| "loss": 1.107, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2222222222222222, | |
| "eval_loss": 1.100467562675476, | |
| "eval_runtime": 61.6325, | |
| "eval_samples_per_second": 3.245, | |
| "eval_steps_per_second": 1.623, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.24444444444444444, | |
| "grad_norm": 6.932832739844831, | |
| "learning_rate": 9.377791156510456e-06, | |
| "loss": 1.1634, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 7.9715773837823924, | |
| "learning_rate": 9.177439057064684e-06, | |
| "loss": 1.0996, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.28888888888888886, | |
| "grad_norm": 7.455497454350953, | |
| "learning_rate": 8.951963347593797e-06, | |
| "loss": 1.12, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3111111111111111, | |
| "grad_norm": 8.943084575523226, | |
| "learning_rate": 8.702720065545024e-06, | |
| "loss": 1.0982, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 8.559521410728289, | |
| "learning_rate": 8.43120818934367e-06, | |
| "loss": 1.1377, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 7.163180714492423, | |
| "learning_rate": 8.139060623360494e-06, | |
| "loss": 1.1313, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.37777777777777777, | |
| "grad_norm": 6.526676853646879, | |
| "learning_rate": 7.828034377432694e-06, | |
| "loss": 1.127, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 7.384056811473898, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 1.0313, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4222222222222222, | |
| "grad_norm": 6.741193547166085, | |
| "learning_rate": 7.156930328406268e-06, | |
| "loss": 1.0519, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 6.831151589131766, | |
| "learning_rate": 6.800888624023552e-06, | |
| "loss": 1.0303, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "eval_loss": 1.0128766298294067, | |
| "eval_runtime": 61.6213, | |
| "eval_samples_per_second": 3.246, | |
| "eval_steps_per_second": 1.623, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4666666666666667, | |
| "grad_norm": 7.994739699626817, | |
| "learning_rate": 6.434016163555452e-06, | |
| "loss": 0.9573, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.4888888888888889, | |
| "grad_norm": 5.9667654988837535, | |
| "learning_rate": 6.058519361147055e-06, | |
| "loss": 1.0128, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5111111111111111, | |
| "grad_norm": 7.202591191442998, | |
| "learning_rate": 5.6766564987506564e-06, | |
| "loss": 1.0354, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 6.773568220076416, | |
| "learning_rate": 5.290724144552379e-06, | |
| "loss": 1.0924, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5555555555555556, | |
| "grad_norm": 5.998554904237593, | |
| "learning_rate": 4.903043341140879e-06, | |
| "loss": 1.063, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5777777777777777, | |
| "grad_norm": 6.980524964053779, | |
| "learning_rate": 4.515945646484105e-06, | |
| "loss": 1.0263, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 5.885312146673563, | |
| "learning_rate": 4.131759111665349e-06, | |
| "loss": 0.9798, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6222222222222222, | |
| "grad_norm": 7.5435937455645155, | |
| "learning_rate": 3.752794279710094e-06, | |
| "loss": 0.9744, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6444444444444445, | |
| "grad_norm": 7.713036460895744, | |
| "learning_rate": 3.3813302897083955e-06, | |
| "loss": 0.9874, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 7.323561184985059, | |
| "learning_rate": 3.019601169804216e-06, | |
| "loss": 0.9439, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "eval_loss": 0.9523131847381592, | |
| "eval_runtime": 61.6063, | |
| "eval_samples_per_second": 3.246, | |
| "eval_steps_per_second": 1.623, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6888888888888889, | |
| "grad_norm": 5.889165095526867, | |
| "learning_rate": 2.6697824014873076e-06, | |
| "loss": 0.9909, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 6.453354559584317, | |
| "learning_rate": 2.333977835991545e-06, | |
| "loss": 0.966, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7333333333333333, | |
| "grad_norm": 5.327231148266013, | |
| "learning_rate": 2.0142070414860704e-06, | |
| "loss": 0.8804, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.7555555555555555, | |
| "grad_norm": 7.333300364450659, | |
| "learning_rate": 1.7123931571546826e-06, | |
| "loss": 0.8833, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7777777777777778, | |
| "grad_norm": 8.600679211226076, | |
| "learning_rate": 1.4303513272105057e-06, | |
| "loss": 0.9143, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 5.446196813851984, | |
| "learning_rate": 1.1697777844051105e-06, | |
| "loss": 0.9209, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8222222222222222, | |
| "grad_norm": 8.420616876259867, | |
| "learning_rate": 9.322396486851626e-07, | |
| "loss": 0.8687, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8444444444444444, | |
| "grad_norm": 6.567024558382901, | |
| "learning_rate": 7.191655023486682e-07, | |
| "loss": 0.8947, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8666666666666667, | |
| "grad_norm": 7.482818561159306, | |
| "learning_rate": 5.318367983829393e-07, | |
| "loss": 0.9376, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 5.7920770579615, | |
| "learning_rate": 3.7138015365554834e-07, | |
| "loss": 0.8362, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "eval_loss": 0.9151431322097778, | |
| "eval_runtime": 61.6034, | |
| "eval_samples_per_second": 3.247, | |
| "eval_steps_per_second": 1.623, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.9111111111111111, | |
| "grad_norm": 7.05898138836526, | |
| "learning_rate": 2.3876057330792344e-07, | |
| "loss": 0.8494, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9333333333333333, | |
| "grad_norm": 7.202485078434944, | |
| "learning_rate": 1.3477564710088097e-07, | |
| "loss": 0.9062, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.9555555555555556, | |
| "grad_norm": 6.041514675233874, | |
| "learning_rate": 6.005075261595495e-08, | |
| "loss": 0.9755, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9777777777777777, | |
| "grad_norm": 6.891604479006659, | |
| "learning_rate": 1.5035294161039882e-08, | |
| "loss": 0.9235, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 7.456362795939337, | |
| "learning_rate": 0.0, | |
| "loss": 0.8962, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 450, | |
| "total_flos": 1092738416640.0, | |
| "train_loss": 1.0288691223992241, | |
| "train_runtime": 1866.138, | |
| "train_samples_per_second": 0.965, | |
| "train_steps_per_second": 0.241 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 450, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1092738416640.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |