| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 312, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06433453960595094, | |
| "grad_norm": 3.45802150331781, | |
| "learning_rate": 9e-06, | |
| "loss": 1.2375, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.12866907921190188, | |
| "grad_norm": 1.1539024437598726, | |
| "learning_rate": 9.701986754966888e-06, | |
| "loss": 0.9458, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.19300361881785283, | |
| "grad_norm": 0.9061861869896987, | |
| "learning_rate": 9.370860927152319e-06, | |
| "loss": 0.8553, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.25733815842380375, | |
| "grad_norm": 0.9981148139996454, | |
| "learning_rate": 9.039735099337749e-06, | |
| "loss": 0.8223, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.32167269802975473, | |
| "grad_norm": 0.8365225486736688, | |
| "learning_rate": 8.70860927152318e-06, | |
| "loss": 0.7753, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.38600723763570566, | |
| "grad_norm": 0.8456361592054138, | |
| "learning_rate": 8.37748344370861e-06, | |
| "loss": 0.7945, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.45034177724165664, | |
| "grad_norm": 0.8510899654200162, | |
| "learning_rate": 8.04635761589404e-06, | |
| "loss": 0.7906, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5146763168476075, | |
| "grad_norm": 0.8094078169285094, | |
| "learning_rate": 7.715231788079471e-06, | |
| "loss": 0.7744, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5790108564535585, | |
| "grad_norm": 0.7429293470064048, | |
| "learning_rate": 7.384105960264901e-06, | |
| "loss": 0.7717, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6433453960595095, | |
| "grad_norm": 0.7512159253738925, | |
| "learning_rate": 7.052980132450332e-06, | |
| "loss": 0.7744, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7076799356654604, | |
| "grad_norm": 0.826044235136776, | |
| "learning_rate": 6.721854304635762e-06, | |
| "loss": 0.7491, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7720144752714113, | |
| "grad_norm": 0.8503335376772373, | |
| "learning_rate": 6.390728476821193e-06, | |
| "loss": 0.7593, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8363490148773622, | |
| "grad_norm": 0.7306086642250036, | |
| "learning_rate": 6.059602649006623e-06, | |
| "loss": 0.7555, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9006835544833133, | |
| "grad_norm": 0.745854567446407, | |
| "learning_rate": 5.728476821192054e-06, | |
| "loss": 0.7434, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9650180940892642, | |
| "grad_norm": 0.7574425315504884, | |
| "learning_rate": 5.397350993377483e-06, | |
| "loss": 0.7764, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0257338158423803, | |
| "grad_norm": 0.7250372088806204, | |
| "learning_rate": 5.066225165562915e-06, | |
| "loss": 0.709, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.0900683554483312, | |
| "grad_norm": 0.75139008443435, | |
| "learning_rate": 4.7350993377483445e-06, | |
| "loss": 0.6574, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.1544028950542822, | |
| "grad_norm": 0.7181949393746255, | |
| "learning_rate": 4.403973509933775e-06, | |
| "loss": 0.6573, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.218737434660233, | |
| "grad_norm": 0.7970783454719403, | |
| "learning_rate": 4.072847682119206e-06, | |
| "loss": 0.6617, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.2830719742661842, | |
| "grad_norm": 0.741621225992888, | |
| "learning_rate": 3.7417218543046362e-06, | |
| "loss": 0.6625, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.3474065138721352, | |
| "grad_norm": 0.7244825073568412, | |
| "learning_rate": 3.4105960264900668e-06, | |
| "loss": 0.6516, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.411741053478086, | |
| "grad_norm": 0.6328326617408025, | |
| "learning_rate": 3.079470198675497e-06, | |
| "loss": 0.6478, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.476075593084037, | |
| "grad_norm": 0.7325603487325139, | |
| "learning_rate": 2.7483443708609275e-06, | |
| "loss": 0.6512, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.540410132689988, | |
| "grad_norm": 0.6447452871518153, | |
| "learning_rate": 2.417218543046358e-06, | |
| "loss": 0.6454, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.6047446722959389, | |
| "grad_norm": 0.7076398532029401, | |
| "learning_rate": 2.086092715231788e-06, | |
| "loss": 0.6539, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.6690792119018898, | |
| "grad_norm": 0.6775962757094781, | |
| "learning_rate": 1.7549668874172188e-06, | |
| "loss": 0.6499, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.7334137515078407, | |
| "grad_norm": 0.6345994881996349, | |
| "learning_rate": 1.4238410596026491e-06, | |
| "loss": 0.6456, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.7977482911137916, | |
| "grad_norm": 0.6759310052346794, | |
| "learning_rate": 1.0927152317880797e-06, | |
| "loss": 0.6499, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.8620828307197428, | |
| "grad_norm": 0.686752021394961, | |
| "learning_rate": 7.6158940397351e-07, | |
| "loss": 0.6526, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.9264173703256935, | |
| "grad_norm": 0.6535588837970772, | |
| "learning_rate": 4.304635761589404e-07, | |
| "loss": 0.6394, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.9907519099316446, | |
| "grad_norm": 0.7094514579767396, | |
| "learning_rate": 9.933774834437088e-08, | |
| "loss": 0.643, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 312, | |
| "total_flos": 119307564613632.0, | |
| "train_loss": 0.7332022241674937, | |
| "train_runtime": 6450.9735, | |
| "train_samples_per_second": 6.168, | |
| "train_steps_per_second": 0.048 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 312, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 119307564613632.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |