| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "global_step": 18960, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 1.947257383966245e-05, | |
| "loss": 0.3326, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 1.8945147679324897e-05, | |
| "loss": 0.2716, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 1.8417721518987345e-05, | |
| "loss": 0.2481, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 1.789029535864979e-05, | |
| "loss": 0.2281, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 1.7362869198312237e-05, | |
| "loss": 0.1909, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 1.6835443037974685e-05, | |
| "loss": 0.1923, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 1.6308016877637133e-05, | |
| "loss": 0.1883, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 1.578059071729958e-05, | |
| "loss": 0.1665, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 1.5253164556962025e-05, | |
| "loss": 0.1421, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 1.4725738396624474e-05, | |
| "loss": 0.1416, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 1.4198312236286922e-05, | |
| "loss": 0.1397, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 1.3670886075949368e-05, | |
| "loss": 0.1157, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 1.3143459915611816e-05, | |
| "loss": 0.0997, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 1.2616033755274262e-05, | |
| "loss": 0.1033, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 1.208860759493671e-05, | |
| "loss": 0.1023, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 4.22, | |
| "learning_rate": 1.1561181434599158e-05, | |
| "loss": 0.0776, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 1.1033755274261604e-05, | |
| "loss": 0.0728, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 1.0506329113924052e-05, | |
| "loss": 0.0792, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 5.01, | |
| "learning_rate": 9.9789029535865e-06, | |
| "loss": 0.0737, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 5.27, | |
| "learning_rate": 9.451476793248946e-06, | |
| "loss": 0.0529, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 5.54, | |
| "learning_rate": 8.924050632911393e-06, | |
| "loss": 0.0558, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 5.8, | |
| "learning_rate": 8.39662447257384e-06, | |
| "loss": 0.0559, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 6.07, | |
| "learning_rate": 7.869198312236287e-06, | |
| "loss": 0.0527, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 6.33, | |
| "learning_rate": 7.341772151898735e-06, | |
| "loss": 0.041, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 6.59, | |
| "learning_rate": 6.814345991561182e-06, | |
| "loss": 0.0407, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 6.86, | |
| "learning_rate": 6.286919831223629e-06, | |
| "loss": 0.0418, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 7.12, | |
| "learning_rate": 5.759493670886076e-06, | |
| "loss": 0.0365, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 7.38, | |
| "learning_rate": 5.2320675105485245e-06, | |
| "loss": 0.0303, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 7.65, | |
| "learning_rate": 4.7046413502109714e-06, | |
| "loss": 0.0306, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 7.91, | |
| "learning_rate": 4.177215189873418e-06, | |
| "loss": 0.031, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 8.18, | |
| "learning_rate": 3.649789029535865e-06, | |
| "loss": 0.0267, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 8.44, | |
| "learning_rate": 3.1223628691983127e-06, | |
| "loss": 0.0249, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 8.7, | |
| "learning_rate": 2.5949367088607596e-06, | |
| "loss": 0.0229, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 8.97, | |
| "learning_rate": 2.067510548523207e-06, | |
| "loss": 0.0232, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 9.23, | |
| "learning_rate": 1.5400843881856542e-06, | |
| "loss": 0.0178, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 9.49, | |
| "learning_rate": 1.0126582278481013e-06, | |
| "loss": 0.0179, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 9.76, | |
| "learning_rate": 4.852320675105486e-07, | |
| "loss": 0.0156, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 18960, | |
| "total_flos": 9.930007088618803e+17, | |
| "train_loss": 0.0949161008691989, | |
| "train_runtime": 15550.5586, | |
| "train_samples_per_second": 233.976, | |
| "train_steps_per_second": 1.219 | |
| } | |
| ], | |
| "max_steps": 18960, | |
| "num_train_epochs": 10, | |
| "total_flos": 9.930007088618803e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |