| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 95, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08421052631578947, | |
| "grad_norm": 64.44918823242188, | |
| "learning_rate": 1.6666666666666667e-05, | |
| "loss": 5.9893, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.16842105263157894, | |
| "grad_norm": 1.1460206508636475, | |
| "learning_rate": 3.888888888888889e-05, | |
| "loss": 1.7662, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.25263157894736843, | |
| "grad_norm": 0.8948838710784912, | |
| "learning_rate": 6.111111111111112e-05, | |
| "loss": 0.8212, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.3368421052631579, | |
| "grad_norm": 0.5042845606803894, | |
| "learning_rate": 8.333333333333334e-05, | |
| "loss": 0.533, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.42105263157894735, | |
| "grad_norm": 0.5063032507896423, | |
| "learning_rate": 0.00010555555555555557, | |
| "loss": 0.425, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.5052631578947369, | |
| "grad_norm": 0.4761255383491516, | |
| "learning_rate": 0.00012777777777777776, | |
| "loss": 0.3875, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.5894736842105263, | |
| "grad_norm": 0.4021145701408386, | |
| "learning_rate": 0.00015000000000000001, | |
| "loss": 0.3799, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.6736842105263158, | |
| "grad_norm": 0.441383421421051, | |
| "learning_rate": 0.00017222222222222224, | |
| "loss": 0.3438, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.7578947368421053, | |
| "grad_norm": 0.3923978805541992, | |
| "learning_rate": 0.00019444444444444446, | |
| "loss": 0.2952, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.8421052631578947, | |
| "grad_norm": 0.39148256182670593, | |
| "learning_rate": 0.00019999007677495127, | |
| "loss": 0.2766, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.9263157894736842, | |
| "grad_norm": 0.3753437101840973, | |
| "learning_rate": 0.0001999459775237086, | |
| "loss": 0.329, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 1.0105263157894737, | |
| "grad_norm": 0.49228230118751526, | |
| "learning_rate": 0.00019986661520865405, | |
| "loss": 0.2808, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 1.0947368421052632, | |
| "grad_norm": 0.3359147608280182, | |
| "learning_rate": 0.00019975201783049805, | |
| "loss": 0.2385, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 1.1789473684210527, | |
| "grad_norm": 0.33956533670425415, | |
| "learning_rate": 0.00019960222582162976, | |
| "loss": 0.224, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 1.263157894736842, | |
| "grad_norm": 0.29228124022483826, | |
| "learning_rate": 0.00019941729203185165, | |
| "loss": 0.2062, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.3473684210526315, | |
| "grad_norm": 0.3463740944862366, | |
| "learning_rate": 0.00019919728170973296, | |
| "loss": 0.2335, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 1.431578947368421, | |
| "grad_norm": 0.30409345030784607, | |
| "learning_rate": 0.00019894227247958845, | |
| "loss": 0.2242, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 1.5157894736842106, | |
| "grad_norm": 0.3040483593940735, | |
| "learning_rate": 0.00019865235431409123, | |
| "loss": 0.1707, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.3167271614074707, | |
| "learning_rate": 0.00019832762950252813, | |
| "loss": 0.2288, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 1.6842105263157894, | |
| "grad_norm": 0.4126874804496765, | |
| "learning_rate": 0.00019796821261471018, | |
| "loss": 0.202, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.768421052631579, | |
| "grad_norm": 0.3122522830963135, | |
| "learning_rate": 0.00019757423046054968, | |
| "loss": 0.2209, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.8526315789473684, | |
| "grad_norm": 0.348960280418396, | |
| "learning_rate": 0.00019714582204531918, | |
| "loss": 0.1551, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.936842105263158, | |
| "grad_norm": 0.413482666015625, | |
| "learning_rate": 0.00019668313852060735, | |
| "loss": 0.1818, | |
| "step": 92 | |
| } | |
| ], | |
| "logging_steps": 4, | |
| "max_steps": 705, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.812911896487526e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |