| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 24750, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04040404040404041, | |
| "grad_norm": 1.265625, | |
| "learning_rate": 1.9587628865979382e-05, | |
| "loss": 0.1646, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.08080808080808081, | |
| "grad_norm": 1.28125, | |
| "learning_rate": 1.8762886597938147e-05, | |
| "loss": 0.1487, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.12121212121212122, | |
| "grad_norm": 1.1875, | |
| "learning_rate": 1.793814432989691e-05, | |
| "loss": 0.1487, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.16161616161616163, | |
| "grad_norm": 1.1953125, | |
| "learning_rate": 1.7113402061855672e-05, | |
| "loss": 0.1491, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.20202020202020202, | |
| "grad_norm": 1.1171875, | |
| "learning_rate": 1.6288659793814433e-05, | |
| "loss": 0.1462, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.24242424242424243, | |
| "grad_norm": 1.1484375, | |
| "learning_rate": 1.5463917525773197e-05, | |
| "loss": 0.1452, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.2828282828282828, | |
| "grad_norm": 1.5234375, | |
| "learning_rate": 1.4639175257731958e-05, | |
| "loss": 0.1459, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.32323232323232326, | |
| "grad_norm": 1.0703125, | |
| "learning_rate": 1.3814432989690723e-05, | |
| "loss": 0.1454, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 1.28125, | |
| "learning_rate": 1.2989690721649485e-05, | |
| "loss": 0.1455, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.40404040404040403, | |
| "grad_norm": 1.515625, | |
| "learning_rate": 1.2164948453608248e-05, | |
| "loss": 0.1462, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.95703125, | |
| "learning_rate": 1.134020618556701e-05, | |
| "loss": 0.1475, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.48484848484848486, | |
| "grad_norm": 1.21875, | |
| "learning_rate": 1.0515463917525775e-05, | |
| "loss": 0.1456, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.5252525252525253, | |
| "grad_norm": 0.9375, | |
| "learning_rate": 9.690721649484536e-06, | |
| "loss": 0.1448, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.5656565656565656, | |
| "grad_norm": 1.2421875, | |
| "learning_rate": 8.865979381443299e-06, | |
| "loss": 0.1463, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.6060606060606061, | |
| "grad_norm": 1.1171875, | |
| "learning_rate": 8.041237113402063e-06, | |
| "loss": 0.1455, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.6464646464646465, | |
| "grad_norm": 1.0234375, | |
| "learning_rate": 7.216494845360825e-06, | |
| "loss": 0.1477, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.6868686868686869, | |
| "grad_norm": 0.97265625, | |
| "learning_rate": 6.391752577319588e-06, | |
| "loss": 0.1468, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 1.1015625, | |
| "learning_rate": 5.567010309278351e-06, | |
| "loss": 0.1429, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 0.7676767676767676, | |
| "grad_norm": 1.375, | |
| "learning_rate": 4.742268041237113e-06, | |
| "loss": 0.1475, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 0.8080808080808081, | |
| "grad_norm": 1.6328125, | |
| "learning_rate": 3.917525773195877e-06, | |
| "loss": 0.1468, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.8484848484848485, | |
| "grad_norm": 1.3828125, | |
| "learning_rate": 3.0927835051546395e-06, | |
| "loss": 0.1423, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.99609375, | |
| "learning_rate": 2.268041237113402e-06, | |
| "loss": 0.144, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 0.9292929292929293, | |
| "grad_norm": 1.3125, | |
| "learning_rate": 1.4432989690721649e-06, | |
| "loss": 0.1447, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 0.9696969696969697, | |
| "grad_norm": 1.140625, | |
| "learning_rate": 6.185567010309279e-07, | |
| "loss": 0.1453, | |
| "step": 24000 | |
| } | |
| ], | |
| "logging_steps": 1000, | |
| "max_steps": 24750, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0760739618816e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |