| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.39732994278448824, | |
| "eval_steps": 500, | |
| "global_step": 2500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01589319771137953, | |
| "grad_norm": 0.9081700444221497, | |
| "learning_rate": 0.0002, | |
| "loss": 2.3142, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03178639542275906, | |
| "grad_norm": 0.8463758826255798, | |
| "learning_rate": 0.0002, | |
| "loss": 2.0015, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.04767959313413859, | |
| "grad_norm": 1.0305041074752808, | |
| "learning_rate": 0.0002, | |
| "loss": 1.9429, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.06357279084551812, | |
| "grad_norm": 0.48407527804374695, | |
| "learning_rate": 0.0002, | |
| "loss": 1.9406, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.07946598855689765, | |
| "grad_norm": 0.9855284690856934, | |
| "learning_rate": 0.0002, | |
| "loss": 1.79, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.09535918626827718, | |
| "grad_norm": 0.8263546228408813, | |
| "learning_rate": 0.0002, | |
| "loss": 1.7586, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.11125238397965671, | |
| "grad_norm": 1.258915901184082, | |
| "learning_rate": 0.0002, | |
| "loss": 1.628, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.12714558169103624, | |
| "grad_norm": 0.7160557508468628, | |
| "learning_rate": 0.0002, | |
| "loss": 1.4786, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.14303877940241577, | |
| "grad_norm": 2.1789512634277344, | |
| "learning_rate": 0.0002, | |
| "loss": 1.4301, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.1589319771137953, | |
| "grad_norm": 0.34466424584388733, | |
| "learning_rate": 0.0002, | |
| "loss": 1.5137, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.17482517482517482, | |
| "grad_norm": 1.9239227771759033, | |
| "learning_rate": 0.0002, | |
| "loss": 1.4945, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.19071837253655435, | |
| "grad_norm": 1.7776683568954468, | |
| "learning_rate": 0.0002, | |
| "loss": 1.3702, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.2066115702479339, | |
| "grad_norm": 1.5118937492370605, | |
| "learning_rate": 0.0002, | |
| "loss": 1.4384, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.22250476795931343, | |
| "grad_norm": 1.504011869430542, | |
| "learning_rate": 0.0002, | |
| "loss": 1.3728, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.23839796567069294, | |
| "grad_norm": 0.8850716352462769, | |
| "learning_rate": 0.0002, | |
| "loss": 1.4082, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.25429116338207247, | |
| "grad_norm": 0.7327104210853577, | |
| "learning_rate": 0.0002, | |
| "loss": 1.3342, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.270184361093452, | |
| "grad_norm": 0.9595212936401367, | |
| "learning_rate": 0.0002, | |
| "loss": 1.3882, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.28607755880483154, | |
| "grad_norm": 2.5830607414245605, | |
| "learning_rate": 0.0002, | |
| "loss": 1.2326, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.3019707565162111, | |
| "grad_norm": 1.852844476699829, | |
| "learning_rate": 0.0002, | |
| "loss": 1.3595, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.3178639542275906, | |
| "grad_norm": 1.6929068565368652, | |
| "learning_rate": 0.0002, | |
| "loss": 1.0967, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.3337571519389701, | |
| "grad_norm": 1.2370538711547852, | |
| "learning_rate": 0.0002, | |
| "loss": 1.3132, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.34965034965034963, | |
| "grad_norm": 1.0512717962265015, | |
| "learning_rate": 0.0002, | |
| "loss": 1.2205, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.36554354736172917, | |
| "grad_norm": 2.0815775394439697, | |
| "learning_rate": 0.0002, | |
| "loss": 1.2684, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.3814367450731087, | |
| "grad_norm": 1.7328189611434937, | |
| "learning_rate": 0.0002, | |
| "loss": 1.0058, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.39732994278448824, | |
| "grad_norm": 1.8121742010116577, | |
| "learning_rate": 0.0002, | |
| "loss": 1.1701, | |
| "step": 2500 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 6292, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.275052425216e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |