| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 222, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.13513513513513514, | |
| "grad_norm": 0.932229220867157, | |
| "learning_rate": 2.173913043478261e-05, | |
| "loss": 1.3895, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.2702702702702703, | |
| "grad_norm": 0.8374117612838745, | |
| "learning_rate": 4.347826086956522e-05, | |
| "loss": 1.2257, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.40540540540540543, | |
| "grad_norm": 0.7463754415512085, | |
| "learning_rate": 4.98475042744222e-05, | |
| "loss": 1.0389, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.5405405405405406, | |
| "grad_norm": 1.3753951787948608, | |
| "learning_rate": 4.910506156279029e-05, | |
| "loss": 0.9585, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.6756756756756757, | |
| "grad_norm": 0.8859611749649048, | |
| "learning_rate": 4.7763104379936555e-05, | |
| "loss": 0.9636, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.8108108108108109, | |
| "grad_norm": 0.6303398609161377, | |
| "learning_rate": 4.585500840294794e-05, | |
| "loss": 0.9832, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9459459459459459, | |
| "grad_norm": 1.2979140281677246, | |
| "learning_rate": 4.342822968779448e-05, | |
| "loss": 1.0458, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.0810810810810811, | |
| "grad_norm": 1.2470672130584717, | |
| "learning_rate": 4.054312439471239e-05, | |
| "loss": 0.7874, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.2162162162162162, | |
| "grad_norm": 0.7219607830047607, | |
| "learning_rate": 3.727144767643984e-05, | |
| "loss": 0.7658, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.3513513513513513, | |
| "grad_norm": 0.4544905424118042, | |
| "learning_rate": 3.369456906329956e-05, | |
| "loss": 0.7855, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4864864864864864, | |
| "grad_norm": 1.0317392349243164, | |
| "learning_rate": 2.990144873009946e-05, | |
| "loss": 0.6696, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.6216216216216215, | |
| "grad_norm": 1.0412318706512451, | |
| "learning_rate": 2.5986424976906322e-05, | |
| "loss": 0.6173, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.7567567567567568, | |
| "grad_norm": 1.030823826789856, | |
| "learning_rate": 2.2046867951027303e-05, | |
| "loss": 0.6786, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.8918918918918919, | |
| "grad_norm": 0.678756594657898, | |
| "learning_rate": 1.8180757964234924e-05, | |
| "loss": 0.6978, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.027027027027027, | |
| "grad_norm": 0.8387508392333984, | |
| "learning_rate": 1.4484248634655401e-05, | |
| "loss": 0.6127, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.1621621621621623, | |
| "grad_norm": 1.1406726837158203, | |
| "learning_rate": 1.1049275460163999e-05, | |
| "loss": 0.5264, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.2972972972972974, | |
| "grad_norm": 0.9228270053863525, | |
| "learning_rate": 7.961269300209159e-06, | |
| "loss": 0.4355, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.4324324324324325, | |
| "grad_norm": 0.8361117839813232, | |
| "learning_rate": 5.297031633820193e-06, | |
| "loss": 0.5041, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.5675675675675675, | |
| "grad_norm": 0.9173290133476257, | |
| "learning_rate": 3.1228244380351602e-06, | |
| "loss": 0.4636, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.7027027027027026, | |
| "grad_norm": 0.5897024869918823, | |
| "learning_rate": 1.4927221931831131e-06, | |
| "loss": 0.448, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.8378378378378377, | |
| "grad_norm": 1.1036897897720337, | |
| "learning_rate": 4.472670021254899e-07, | |
| "loss": 0.4568, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.972972972972973, | |
| "grad_norm": 2.8205678462982178, | |
| "learning_rate": 1.2460271845654569e-08, | |
| "loss": 0.3889, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 222, | |
| "total_flos": 1.6682563778052096e+16, | |
| "train_loss": 0.7447243598667351, | |
| "train_runtime": 1442.9511, | |
| "train_samples_per_second": 1.229, | |
| "train_steps_per_second": 0.154 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 222, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "total_flos": 1.6682563778052096e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |