| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 50, | |
| "global_step": 477, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.12598425196850394, | |
| "grad_norm": 1.0565537214279175, | |
| "learning_rate": 7.6e-05, | |
| "loss": 2.6138, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.25196850393700787, | |
| "grad_norm": 0.6730015873908997, | |
| "learning_rate": 0.00015600000000000002, | |
| "loss": 1.3505, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.31496062992125984, | |
| "eval_loss": 0.9492911100387573, | |
| "eval_runtime": 46.3053, | |
| "eval_samples_per_second": 13.713, | |
| "eval_steps_per_second": 6.867, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3779527559055118, | |
| "grad_norm": 0.3552263081073761, | |
| "learning_rate": 0.00019578454332552694, | |
| "loss": 0.9638, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5039370078740157, | |
| "grad_norm": 0.41842517256736755, | |
| "learning_rate": 0.0001864168618266979, | |
| "loss": 0.8727, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6299212598425197, | |
| "grad_norm": 0.4583728313446045, | |
| "learning_rate": 0.00017704918032786885, | |
| "loss": 0.8165, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6299212598425197, | |
| "eval_loss": 0.7775229811668396, | |
| "eval_runtime": 45.4629, | |
| "eval_samples_per_second": 13.967, | |
| "eval_steps_per_second": 6.995, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7559055118110236, | |
| "grad_norm": 0.44533494114875793, | |
| "learning_rate": 0.00016768149882903982, | |
| "loss": 0.7824, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8818897637795275, | |
| "grad_norm": 0.5012836456298828, | |
| "learning_rate": 0.00015831381733021077, | |
| "loss": 0.7583, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9448818897637795, | |
| "eval_loss": 0.7103726863861084, | |
| "eval_runtime": 45.6673, | |
| "eval_samples_per_second": 13.905, | |
| "eval_steps_per_second": 6.963, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0062992125984251, | |
| "grad_norm": 0.5046865940093994, | |
| "learning_rate": 0.00014894613583138174, | |
| "loss": 0.733, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.132283464566929, | |
| "grad_norm": 0.5601876378059387, | |
| "learning_rate": 0.0001395784543325527, | |
| "loss": 0.6408, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.258267716535433, | |
| "grad_norm": 0.5865635275840759, | |
| "learning_rate": 0.00013021077283372365, | |
| "loss": 0.63, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.258267716535433, | |
| "eval_loss": 0.6776148080825806, | |
| "eval_runtime": 45.8559, | |
| "eval_samples_per_second": 13.848, | |
| "eval_steps_per_second": 6.935, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.384251968503937, | |
| "grad_norm": 0.6868230104446411, | |
| "learning_rate": 0.00012084309133489463, | |
| "loss": 0.6205, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.510236220472441, | |
| "grad_norm": 0.6518662571907043, | |
| "learning_rate": 0.00011147540983606557, | |
| "loss": 0.6077, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.573228346456693, | |
| "eval_loss": 0.6471548676490784, | |
| "eval_runtime": 45.8362, | |
| "eval_samples_per_second": 13.854, | |
| "eval_steps_per_second": 6.938, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.6362204724409448, | |
| "grad_norm": 0.7308318018913269, | |
| "learning_rate": 0.00010210772833723654, | |
| "loss": 0.6222, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.762204724409449, | |
| "grad_norm": 0.6749514937400818, | |
| "learning_rate": 9.27400468384075e-05, | |
| "loss": 0.6124, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.8881889763779527, | |
| "grad_norm": 0.6839364767074585, | |
| "learning_rate": 8.337236533957846e-05, | |
| "loss": 0.5927, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.8881889763779527, | |
| "eval_loss": 0.6262577772140503, | |
| "eval_runtime": 45.9433, | |
| "eval_samples_per_second": 13.821, | |
| "eval_steps_per_second": 6.922, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.0125984251968503, | |
| "grad_norm": 0.691892147064209, | |
| "learning_rate": 7.400468384074943e-05, | |
| "loss": 0.576, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.1385826771653544, | |
| "grad_norm": 0.7489628195762634, | |
| "learning_rate": 6.463700234192038e-05, | |
| "loss": 0.4875, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.2015748031496063, | |
| "eval_loss": 0.6290721297264099, | |
| "eval_runtime": 45.8688, | |
| "eval_samples_per_second": 13.844, | |
| "eval_steps_per_second": 6.933, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.264566929133858, | |
| "grad_norm": 0.8055089712142944, | |
| "learning_rate": 5.5269320843091335e-05, | |
| "loss": 0.4786, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 2.3905511811023623, | |
| "grad_norm": 0.8845012784004211, | |
| "learning_rate": 4.59016393442623e-05, | |
| "loss": 0.4755, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.516535433070866, | |
| "grad_norm": 0.8709272146224976, | |
| "learning_rate": 3.6533957845433256e-05, | |
| "loss": 0.4834, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.516535433070866, | |
| "eval_loss": 0.6143574118614197, | |
| "eval_runtime": 46.2566, | |
| "eval_samples_per_second": 13.728, | |
| "eval_steps_per_second": 6.875, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.64251968503937, | |
| "grad_norm": 0.8484326601028442, | |
| "learning_rate": 2.716627634660422e-05, | |
| "loss": 0.4714, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.768503937007874, | |
| "grad_norm": 0.8441830277442932, | |
| "learning_rate": 1.7798594847775178e-05, | |
| "loss": 0.4635, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.8314960629921258, | |
| "eval_loss": 0.6087988018989563, | |
| "eval_runtime": 46.2101, | |
| "eval_samples_per_second": 13.742, | |
| "eval_steps_per_second": 6.882, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.894488188976378, | |
| "grad_norm": 0.7955852150917053, | |
| "learning_rate": 8.430913348946136e-06, | |
| "loss": 0.4659, | |
| "step": 460 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 477, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 200, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.43734560692695e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |