| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 200, | |
| "global_step": 211, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04739336492890995, | |
| "grad_norm": 7.114734649658203, | |
| "learning_rate": 4.0909090909090915e-06, | |
| "loss": 0.8177, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0947867298578199, | |
| "grad_norm": 4.509153366088867, | |
| "learning_rate": 8.636363636363637e-06, | |
| "loss": 0.5856, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.14218009478672985, | |
| "grad_norm": 3.8386244773864746, | |
| "learning_rate": 9.966191788709716e-06, | |
| "loss": 0.5105, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.1895734597156398, | |
| "grad_norm": 3.2514610290527344, | |
| "learning_rate": 9.801700234118e-06, | |
| "loss": 0.4543, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.23696682464454977, | |
| "grad_norm": 3.6072804927825928, | |
| "learning_rate": 9.504844339512096e-06, | |
| "loss": 0.4604, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2843601895734597, | |
| "grad_norm": 3.3668527603149414, | |
| "learning_rate": 9.083807275988285e-06, | |
| "loss": 0.4438, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.33175355450236965, | |
| "grad_norm": 3.334531307220459, | |
| "learning_rate": 8.550195410107903e-06, | |
| "loss": 0.4286, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3791469194312796, | |
| "grad_norm": 3.738762378692627, | |
| "learning_rate": 7.918718361173951e-06, | |
| "loss": 0.4451, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.4265402843601896, | |
| "grad_norm": 3.079043388366699, | |
| "learning_rate": 7.206783513808721e-06, | |
| "loss": 0.4271, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.47393364928909953, | |
| "grad_norm": 3.276974678039551, | |
| "learning_rate": 6.434016163555452e-06, | |
| "loss": 0.4262, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5213270142180095, | |
| "grad_norm": 3.2487432956695557, | |
| "learning_rate": 5.621718523237427e-06, | |
| "loss": 0.4296, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5687203791469194, | |
| "grad_norm": 2.8787481784820557, | |
| "learning_rate": 4.792282503180867e-06, | |
| "loss": 0.3954, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.6161137440758294, | |
| "grad_norm": 2.8975260257720947, | |
| "learning_rate": 3.968572452684113e-06, | |
| "loss": 0.3865, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.6635071090047393, | |
| "grad_norm": 2.86442494392395, | |
| "learning_rate": 3.173294878168025e-06, | |
| "loss": 0.3983, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.7109004739336493, | |
| "grad_norm": 3.0074281692504883, | |
| "learning_rate": 2.428372512445233e-06, | |
| "loss": 0.3896, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.7582938388625592, | |
| "grad_norm": 2.670234441757202, | |
| "learning_rate": 1.7543399896022406e-06, | |
| "loss": 0.3797, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.8056872037914692, | |
| "grad_norm": 3.0328829288482666, | |
| "learning_rate": 1.1697777844051105e-06, | |
| "loss": 0.3531, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.8530805687203792, | |
| "grad_norm": 2.716613292694092, | |
| "learning_rate": 6.908000203341802e-07, | |
| "loss": 0.3598, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.9004739336492891, | |
| "grad_norm": 2.890418529510498, | |
| "learning_rate": 3.306102654031823e-07, | |
| "loss": 0.36, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.9478672985781991, | |
| "grad_norm": 2.654154062271118, | |
| "learning_rate": 9.913756075728088e-08, | |
| "loss": 0.3522, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9478672985781991, | |
| "eval_loss": 0.35650503635406494, | |
| "eval_runtime": 44.9859, | |
| "eval_samples_per_second": 16.672, | |
| "eval_steps_per_second": 2.09, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.995260663507109, | |
| "grad_norm": 2.927879571914673, | |
| "learning_rate": 2.7627153366222014e-09, | |
| "loss": 0.3647, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 211, | |
| "total_flos": 25695907840000.0, | |
| "train_loss": 0.4361029147254347, | |
| "train_runtime": 2869.1248, | |
| "train_samples_per_second": 2.351, | |
| "train_steps_per_second": 0.074 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 211, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 25695907840000.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |