| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.2653061224489797, | |
| "eval_steps": 500, | |
| "global_step": 80, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 5.09375, | |
| "learning_rate": 2.9968542393565676e-06, | |
| "loss": 1.9388, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 4.4375, | |
| "learning_rate": 2.9616157869703894e-06, | |
| "loss": 1.8455, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "grad_norm": 3.59375, | |
| "learning_rate": 2.8881318444640566e-06, | |
| "loss": 1.7796, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 3.796875, | |
| "learning_rate": 2.778325235483954e-06, | |
| "loss": 1.8091, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 4.625, | |
| "learning_rate": 2.6350692237265428e-06, | |
| "loss": 1.7224, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "grad_norm": 3.953125, | |
| "learning_rate": 2.4621123294467098e-06, | |
| "loss": 1.7108, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "grad_norm": 4.125, | |
| "learning_rate": 2.2639802434931445e-06, | |
| "loss": 1.7299, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "grad_norm": 3.625, | |
| "learning_rate": 2.0458574054452316e-06, | |
| "loss": 1.7111, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "grad_norm": 3.53125, | |
| "learning_rate": 1.813451344546913e-06, | |
| "loss": 1.7364, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "grad_norm": 3.1875, | |
| "learning_rate": 1.5728433331716726e-06, | |
| "loss": 1.6664, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 3.125, | |
| "learning_rate": 1.3303292607070737e-06, | |
| "loss": 1.6673, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "grad_norm": 3.65625, | |
| "learning_rate": 1.0922548916454855e-06, | |
| "loss": 1.6219, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "grad_norm": 2.8125, | |
| "learning_rate": 8.648498186137653e-07, | |
| "loss": 1.6648, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "grad_norm": 3.53125, | |
| "learning_rate": 6.540644552236401e-07, | |
| "loss": 1.699, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "grad_norm": 4.15625, | |
| "learning_rate": 4.6541433408284356e-07, | |
| "loss": 1.6821, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "grad_norm": 3.546875, | |
| "learning_rate": 3.0383578415591913e-07, | |
| "loss": 1.6633, | |
| "step": 80 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 100, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 20, | |
| "total_flos": 6434740059291648.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |