| { | |
| "best_metric": 1.9664812088012695, | |
| "best_model_checkpoint": "./output/checkpoint-150", | |
| "epoch": 0.03235722375020223, | |
| "eval_steps": 150, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0021571482500134824, | |
| "grad_norm": 46.34773635864258, | |
| "learning_rate": 2.82842712474619e-06, | |
| "loss": 18.4687, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.004314296500026965, | |
| "grad_norm": 43.269718170166016, | |
| "learning_rate": 5.65685424949238e-06, | |
| "loss": 18.0905, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.006471444750040446, | |
| "grad_norm": 42.11697769165039, | |
| "learning_rate": 8.48528137423857e-06, | |
| "loss": 17.6192, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.00862859300005393, | |
| "grad_norm": 36.54969024658203, | |
| "learning_rate": 1.131370849898476e-05, | |
| "loss": 17.4029, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.010785741250067411, | |
| "grad_norm": 35.50069808959961, | |
| "learning_rate": 1.414213562373095e-05, | |
| "loss": 6.7145, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.012942889500080892, | |
| "grad_norm": 59.332000732421875, | |
| "learning_rate": 1.697056274847714e-05, | |
| "loss": 6.775, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.015100037750094376, | |
| "grad_norm": 60.65806579589844, | |
| "learning_rate": 1.979898987322333e-05, | |
| "loss": 18.2243, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.01725718600010786, | |
| "grad_norm": 39.01629638671875, | |
| "learning_rate": 2.262741699796952e-05, | |
| "loss": 16.7181, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.01941433425012134, | |
| "grad_norm": 46.44580078125, | |
| "learning_rate": 2.545584412271571e-05, | |
| "loss": 16.5121, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.021571482500134822, | |
| "grad_norm": 40.75702667236328, | |
| "learning_rate": 2.82842712474619e-05, | |
| "loss": 16.5158, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.023728630750148305, | |
| "grad_norm": 43.35206604003906, | |
| "learning_rate": 3.111269837220809e-05, | |
| "loss": 16.0931, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.025885779000161785, | |
| "grad_norm": 43.914920806884766, | |
| "learning_rate": 3.394112549695428e-05, | |
| "loss": 16.1211, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.028042927250175268, | |
| "grad_norm": 42.83036422729492, | |
| "learning_rate": 3.676955262170047e-05, | |
| "loss": 16.039, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.03020007550018875, | |
| "grad_norm": 48.97016906738281, | |
| "learning_rate": 3.959797974644666e-05, | |
| "loss": 15.4753, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.03235722375020223, | |
| "grad_norm": 40.86469268798828, | |
| "learning_rate": 4.242640687119285e-05, | |
| "loss": 15.3947, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.03235722375020223, | |
| "eval_loss": 1.9664812088012695, | |
| "eval_runtime": 55.7926, | |
| "eval_samples_per_second": 8.962, | |
| "eval_steps_per_second": 8.962, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 50000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 11, | |
| "save_steps": 150, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.8441094036842086e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |