| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.4336917562724014, | |
| "eval_steps": 2000, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 2.5333876609802246, | |
| "learning_rate": 9.9e-07, | |
| "loss": 0.9981, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 2.1556735038757324, | |
| "learning_rate": 9.9e-07, | |
| "loss": 0.7461, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 2.5188281536102295, | |
| "learning_rate": 9.7989898989899e-07, | |
| "loss": 0.6707, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 2.559631824493408, | |
| "learning_rate": 9.697979797979798e-07, | |
| "loss": 0.6281, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 3.3940675258636475, | |
| "learning_rate": 9.596969696969696e-07, | |
| "loss": 0.6437, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 2.0749378204345703, | |
| "learning_rate": 9.495959595959595e-07, | |
| "loss": 0.6319, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 2.3027114868164062, | |
| "learning_rate": 9.394949494949495e-07, | |
| "loss": 0.6477, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 1.6429717540740967, | |
| "learning_rate": 9.293939393939394e-07, | |
| "loss": 0.6564, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 2.391716718673706, | |
| "learning_rate": 9.192929292929292e-07, | |
| "loss": 0.6263, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 2.9794795513153076, | |
| "learning_rate": 9.091919191919192e-07, | |
| "loss": 0.6422, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 2.853337049484253, | |
| "learning_rate": 8.99090909090909e-07, | |
| "loss": 0.6043, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 2.453305244445801, | |
| "learning_rate": 8.88989898989899e-07, | |
| "loss": 0.5914, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "grad_norm": 2.768860340118408, | |
| "learning_rate": 8.788888888888889e-07, | |
| "loss": 0.6194, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 4.065279483795166, | |
| "learning_rate": 8.687878787878787e-07, | |
| "loss": 0.6038, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 3.8275232315063477, | |
| "learning_rate": 8.586868686868687e-07, | |
| "loss": 0.6038, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "grad_norm": 2.6090586185455322, | |
| "learning_rate": 8.485858585858586e-07, | |
| "loss": 0.592, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "grad_norm": 1.9870526790618896, | |
| "learning_rate": 8.384848484848484e-07, | |
| "loss": 0.5885, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "grad_norm": 2.2942471504211426, | |
| "learning_rate": 8.283838383838383e-07, | |
| "loss": 0.622, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "grad_norm": 2.0904691219329834, | |
| "learning_rate": 8.182828282828283e-07, | |
| "loss": 0.5857, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "grad_norm": 2.909604072570801, | |
| "learning_rate": 8.081818181818182e-07, | |
| "loss": 0.5841, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "eval_loss": 0.7236512303352356, | |
| "eval_runtime": 192.8239, | |
| "eval_samples_per_second": 5.186, | |
| "eval_steps_per_second": 0.648, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 10000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 2000, | |
| "total_flos": 3.769869715384566e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |