| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.930524428507396, | |
| "eval_steps": 500, | |
| "global_step": 11000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.0, | |
| "learning_rate": 4.775885253249664e-05, | |
| "loss": 0.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.0, | |
| "learning_rate": 4.551770506499328e-05, | |
| "loss": 0.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 0.0, | |
| "learning_rate": 4.327655759748992e-05, | |
| "loss": 0.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.0, | |
| "learning_rate": 4.1035410129986554e-05, | |
| "loss": 0.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 0.0, | |
| "learning_rate": 3.8794262662483196e-05, | |
| "loss": 0.0, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "grad_norm": 0.0, | |
| "learning_rate": 3.655311519497983e-05, | |
| "loss": 0.0, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "grad_norm": 0.0, | |
| "learning_rate": 3.4311967727476465e-05, | |
| "loss": 0.0, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "grad_norm": 0.0, | |
| "learning_rate": 3.2070820259973107e-05, | |
| "loss": 0.0, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "grad_norm": 0.0, | |
| "learning_rate": 2.9829672792469748e-05, | |
| "loss": 0.0, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 0.0, | |
| "learning_rate": 2.7588525324966386e-05, | |
| "loss": 0.0, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "grad_norm": 0.0, | |
| "learning_rate": 2.5347377857463024e-05, | |
| "loss": 0.0, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "grad_norm": 0.0, | |
| "learning_rate": 2.310623038995966e-05, | |
| "loss": 0.0, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "grad_norm": 0.0, | |
| "learning_rate": 2.08650829224563e-05, | |
| "loss": 0.0, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "grad_norm": 0.0, | |
| "learning_rate": 1.8623935454952938e-05, | |
| "loss": 0.0, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "grad_norm": 0.0, | |
| "learning_rate": 1.6382787987449573e-05, | |
| "loss": 0.0, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "grad_norm": 0.0, | |
| "learning_rate": 1.4141640519946214e-05, | |
| "loss": 0.0, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "grad_norm": 0.0, | |
| "learning_rate": 1.1900493052442852e-05, | |
| "loss": 0.0, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.03, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.65934558493949e-06, | |
| "loss": 0.0, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.26, | |
| "grad_norm": 0.0, | |
| "learning_rate": 7.418198117436127e-06, | |
| "loss": 0.0, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "grad_norm": 0.0, | |
| "learning_rate": 5.177050649932766e-06, | |
| "loss": 0.0, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.71, | |
| "grad_norm": 0.0, | |
| "learning_rate": 2.9359031824294042e-06, | |
| "loss": 0.0, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "grad_norm": 0.0, | |
| "learning_rate": 6.947557149260422e-07, | |
| "loss": 0.0, | |
| "step": 11000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 11155, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 1.781601348825047e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |