| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.7540983606557377, | |
| "eval_steps": 500, | |
| "global_step": 21, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.13114754098360656, | |
| "grad_norm": 38.48949567054442, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 2.4278, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.26229508196721313, | |
| "grad_norm": 39.356777754899475, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 2.4386, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.39344262295081966, | |
| "grad_norm": 33.30743701183079, | |
| "learning_rate": 5e-05, | |
| "loss": 1.9706, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.5245901639344263, | |
| "grad_norm": 14.11009284862285, | |
| "learning_rate": 4.962019382530521e-05, | |
| "loss": 2.046, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.6557377049180327, | |
| "grad_norm": 15.728498285095311, | |
| "learning_rate": 4.849231551964771e-05, | |
| "loss": 1.8774, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.7868852459016393, | |
| "grad_norm": 7.811945380246204, | |
| "learning_rate": 4.665063509461097e-05, | |
| "loss": 1.555, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.9180327868852459, | |
| "grad_norm": 4.212013153204252, | |
| "learning_rate": 4.415111107797445e-05, | |
| "loss": 1.3675, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.9180327868852459, | |
| "eval_loss": 1.2844842672348022, | |
| "eval_runtime": 9.2887, | |
| "eval_samples_per_second": 20.67, | |
| "eval_steps_per_second": 2.584, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 1.0491803278688525, | |
| "grad_norm": 4.5268328646450735, | |
| "learning_rate": 4.1069690242163484e-05, | |
| "loss": 1.2474, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 1.180327868852459, | |
| "grad_norm": 3.2715775686151596, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 1.1467, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 1.3114754098360657, | |
| "grad_norm": 2.0643659092546915, | |
| "learning_rate": 3.355050358314172e-05, | |
| "loss": 1.0749, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.4426229508196722, | |
| "grad_norm": 2.304739353698177, | |
| "learning_rate": 2.9341204441673266e-05, | |
| "loss": 1.0221, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 1.5737704918032787, | |
| "grad_norm": 2.127617323697091, | |
| "learning_rate": 2.5e-05, | |
| "loss": 1.0107, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 1.7049180327868854, | |
| "grad_norm": 2.0707864059238807, | |
| "learning_rate": 2.0658795558326743e-05, | |
| "loss": 0.9786, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 1.8360655737704918, | |
| "grad_norm": 1.3495555593879531, | |
| "learning_rate": 1.6449496416858284e-05, | |
| "loss": 0.956, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.9672131147540983, | |
| "grad_norm": 1.420785537317645, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 0.9289, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 1.9672131147540983, | |
| "eval_loss": 1.018620491027832, | |
| "eval_runtime": 9.0608, | |
| "eval_samples_per_second": 21.19, | |
| "eval_steps_per_second": 2.649, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 2.098360655737705, | |
| "grad_norm": 1.2601137070878647, | |
| "learning_rate": 8.930309757836517e-06, | |
| "loss": 0.8122, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 2.2295081967213113, | |
| "grad_norm": 1.3786806913074527, | |
| "learning_rate": 5.848888922025553e-06, | |
| "loss": 0.7862, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 2.360655737704918, | |
| "grad_norm": 1.1372265617537292, | |
| "learning_rate": 3.3493649053890326e-06, | |
| "loss": 0.7698, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 2.4918032786885247, | |
| "grad_norm": 1.085080138353371, | |
| "learning_rate": 1.5076844803522922e-06, | |
| "loss": 0.7537, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 2.6229508196721314, | |
| "grad_norm": 1.067963510724275, | |
| "learning_rate": 3.7980617469479953e-07, | |
| "loss": 0.7358, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.7540983606557377, | |
| "grad_norm": 1.0345534581342344, | |
| "learning_rate": 0.0, | |
| "loss": 0.7336, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 2.7540983606557377, | |
| "eval_loss": 1.0155988931655884, | |
| "eval_runtime": 8.9999, | |
| "eval_samples_per_second": 21.334, | |
| "eval_steps_per_second": 2.667, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 2.7540983606557377, | |
| "step": 21, | |
| "total_flos": 3971544514560.0, | |
| "train_loss": 1.26855130422683, | |
| "train_runtime": 385.6521, | |
| "train_samples_per_second": 3.757, | |
| "train_steps_per_second": 0.054 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 21, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3971544514560.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |