| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.493765586034913, | |
| "eval_steps": 500, | |
| "global_step": 12000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.10390689941812137, | |
| "grad_norm": 1.8855979442596436, | |
| "learning_rate": 1.9310058187863676e-05, | |
| "loss": 0.8022, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.20781379883624274, | |
| "grad_norm": 0.8622527122497559, | |
| "learning_rate": 1.86173455250762e-05, | |
| "loss": 0.5316, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.3117206982543641, | |
| "grad_norm": 1.404678225517273, | |
| "learning_rate": 1.7924632862288724e-05, | |
| "loss": 0.4832, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.41562759767248547, | |
| "grad_norm": 1.3559819459915161, | |
| "learning_rate": 1.7233305624826823e-05, | |
| "loss": 0.4518, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.5195344970906068, | |
| "grad_norm": 0.8163271546363831, | |
| "learning_rate": 1.6540592962039347e-05, | |
| "loss": 0.4389, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.6234413965087282, | |
| "grad_norm": 0.8109046816825867, | |
| "learning_rate": 1.584788029925187e-05, | |
| "loss": 0.4193, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.7273482959268496, | |
| "grad_norm": 1.0217444896697998, | |
| "learning_rate": 1.5155167636464397e-05, | |
| "loss": 0.4215, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.8312551953449709, | |
| "grad_norm": 1.6476292610168457, | |
| "learning_rate": 1.446245497367692e-05, | |
| "loss": 0.4062, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.9351620947630923, | |
| "grad_norm": 1.4694277048110962, | |
| "learning_rate": 1.3769742310889445e-05, | |
| "loss": 0.4029, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.3212089240550995, | |
| "eval_runtime": 31.4577, | |
| "eval_samples_per_second": 15.704, | |
| "eval_steps_per_second": 7.852, | |
| "step": 4812 | |
| }, | |
| { | |
| "epoch": 1.0390689941812137, | |
| "grad_norm": 0.6561925411224365, | |
| "learning_rate": 1.3077029648101969e-05, | |
| "loss": 0.4059, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.142975893599335, | |
| "grad_norm": 0.6741281747817993, | |
| "learning_rate": 1.2384316985314493e-05, | |
| "loss": 0.3594, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.2468827930174564, | |
| "grad_norm": 0.48619207739830017, | |
| "learning_rate": 1.1691604322527017e-05, | |
| "loss": 0.3736, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.3507896924355778, | |
| "grad_norm": 1.1009119749069214, | |
| "learning_rate": 1.099889165973954e-05, | |
| "loss": 0.3624, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.4546965918536992, | |
| "grad_norm": 0.3497615456581116, | |
| "learning_rate": 1.0306178996952066e-05, | |
| "loss": 0.3516, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.5586034912718203, | |
| "grad_norm": 1.4209001064300537, | |
| "learning_rate": 9.61346633416459e-06, | |
| "loss": 0.3565, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.6625103906899419, | |
| "grad_norm": 0.8116744160652161, | |
| "learning_rate": 8.920753671377114e-06, | |
| "loss": 0.3635, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.766417290108063, | |
| "grad_norm": 0.8015578985214233, | |
| "learning_rate": 8.228041008589638e-06, | |
| "loss": 0.3549, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.8703241895261846, | |
| "grad_norm": 0.7980790734291077, | |
| "learning_rate": 7.536713771127737e-06, | |
| "loss": 0.3495, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.9742310889443058, | |
| "grad_norm": 1.4501579999923706, | |
| "learning_rate": 6.845386533665836e-06, | |
| "loss": 0.3385, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.29613471031188965, | |
| "eval_runtime": 31.4504, | |
| "eval_samples_per_second": 15.707, | |
| "eval_steps_per_second": 7.854, | |
| "step": 9624 | |
| }, | |
| { | |
| "epoch": 2.0781379883624274, | |
| "grad_norm": 0.6130263209342957, | |
| "learning_rate": 6.15267387087836e-06, | |
| "loss": 0.3293, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.1820448877805485, | |
| "grad_norm": 1.2724053859710693, | |
| "learning_rate": 5.459961208090885e-06, | |
| "loss": 0.3369, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.28595178719867, | |
| "grad_norm": 0.7700533270835876, | |
| "learning_rate": 4.767248545303408e-06, | |
| "loss": 0.3387, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.3898586866167912, | |
| "grad_norm": 1.4450799226760864, | |
| "learning_rate": 4.0759213078415074e-06, | |
| "loss": 0.3411, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.493765586034913, | |
| "grad_norm": 0.8265316486358643, | |
| "learning_rate": 3.3832086450540318e-06, | |
| "loss": 0.317, | |
| "step": 12000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 14436, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.461377145765888e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |