| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 18.0, | |
| "eval_steps": 500, | |
| "global_step": 11952, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.7530120481927711, | |
| "grad_norm": 0.2647170126438141, | |
| "learning_rate": 0.0004811746987951807, | |
| "loss": 0.3311, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.5060240963855422, | |
| "grad_norm": 0.22880347073078156, | |
| "learning_rate": 0.00046234939759036143, | |
| "loss": 0.0907, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.2590361445783134, | |
| "grad_norm": 0.1677163541316986, | |
| "learning_rate": 0.00044352409638554217, | |
| "loss": 0.0568, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 3.0120481927710845, | |
| "grad_norm": 0.12338300049304962, | |
| "learning_rate": 0.0004246987951807229, | |
| "loss": 0.0451, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.765060240963855, | |
| "grad_norm": 0.08597979694604874, | |
| "learning_rate": 0.0004058734939759036, | |
| "loss": 0.0386, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 4.518072289156627, | |
| "grad_norm": 0.0988745242357254, | |
| "learning_rate": 0.00038704819277108433, | |
| "loss": 0.0352, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 5.271084337349397, | |
| "grad_norm": 0.11785969883203506, | |
| "learning_rate": 0.00036822289156626507, | |
| "loss": 0.0331, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 6.024096385542169, | |
| "grad_norm": 0.09906379133462906, | |
| "learning_rate": 0.0003493975903614458, | |
| "loss": 0.0315, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 6.77710843373494, | |
| "grad_norm": 0.1129639744758606, | |
| "learning_rate": 0.0003305722891566265, | |
| "loss": 0.0301, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 7.530120481927711, | |
| "grad_norm": 0.07321502268314362, | |
| "learning_rate": 0.00031174698795180723, | |
| "loss": 0.0292, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 8.283132530120483, | |
| "grad_norm": 0.05083702132105827, | |
| "learning_rate": 0.0002929216867469879, | |
| "loss": 0.028, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 9.036144578313253, | |
| "grad_norm": 0.073179692029953, | |
| "learning_rate": 0.0002740963855421687, | |
| "loss": 0.0275, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 9.789156626506024, | |
| "grad_norm": 0.060432616621255875, | |
| "learning_rate": 0.0002552710843373494, | |
| "loss": 0.0266, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 10.542168674698795, | |
| "grad_norm": 0.05641400068998337, | |
| "learning_rate": 0.00023644578313253013, | |
| "loss": 0.0265, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 11.295180722891565, | |
| "grad_norm": 0.055228352546691895, | |
| "learning_rate": 0.00021762048192771087, | |
| "loss": 0.0257, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 12.048192771084338, | |
| "grad_norm": 0.055986884981393814, | |
| "learning_rate": 0.00019879518072289158, | |
| "loss": 0.0254, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 12.801204819277109, | |
| "grad_norm": 0.06879087537527084, | |
| "learning_rate": 0.0001799698795180723, | |
| "loss": 0.025, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 13.55421686746988, | |
| "grad_norm": 0.08162941783666611, | |
| "learning_rate": 0.00016114457831325303, | |
| "loss": 0.0248, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 14.30722891566265, | |
| "grad_norm": 0.0502689927816391, | |
| "learning_rate": 0.00014231927710843374, | |
| "loss": 0.0242, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 15.060240963855422, | |
| "grad_norm": 0.052483588457107544, | |
| "learning_rate": 0.00012349397590361445, | |
| "loss": 0.0245, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 15.813253012048193, | |
| "grad_norm": 0.04214683175086975, | |
| "learning_rate": 0.00010466867469879517, | |
| "loss": 0.0238, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 16.566265060240966, | |
| "grad_norm": 0.03767360374331474, | |
| "learning_rate": 8.58433734939759e-05, | |
| "loss": 0.0239, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 17.319277108433734, | |
| "grad_norm": 0.04902500659227371, | |
| "learning_rate": 6.701807228915662e-05, | |
| "loss": 0.0234, | |
| "step": 11500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 13280, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6462198830333952.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |