| { | |
| "best_metric": 0.6810389161109924, | |
| "best_model_checkpoint": "YELP_full/checkpoint-10158", | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 13544, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 8.398588180541992, | |
| "learning_rate": 5.8227997637330186e-05, | |
| "loss": 0.9245, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 3.5803122520446777, | |
| "learning_rate": 5.645599527466036e-05, | |
| "loss": 0.7913, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 3.3800671100616455, | |
| "learning_rate": 5.468399291199055e-05, | |
| "loss": 0.7613, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "grad_norm": 1.9862334728240967, | |
| "learning_rate": 5.291199054932074e-05, | |
| "loss": 0.7362, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 2.043750524520874, | |
| "learning_rate": 5.113998818665092e-05, | |
| "loss": 0.7262, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 2.8348655700683594, | |
| "learning_rate": 4.93679858239811e-05, | |
| "loss": 0.7165, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.69042, | |
| "eval_loss": 0.6984841227531433, | |
| "eval_runtime": 141.1206, | |
| "eval_samples_per_second": 354.307, | |
| "eval_steps_per_second": 1.849, | |
| "step": 3386 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "grad_norm": 2.2309489250183105, | |
| "learning_rate": 4.7595983461311283e-05, | |
| "loss": 0.7047, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "grad_norm": 2.665053129196167, | |
| "learning_rate": 4.582398109864147e-05, | |
| "loss": 0.6764, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "grad_norm": 2.590282678604126, | |
| "learning_rate": 4.4051978735971645e-05, | |
| "loss": 0.6733, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "grad_norm": 2.890204429626465, | |
| "learning_rate": 4.227997637330183e-05, | |
| "loss": 0.6712, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "grad_norm": 2.0544567108154297, | |
| "learning_rate": 4.050797401063202e-05, | |
| "loss": 0.6692, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "grad_norm": 1.7416001558303833, | |
| "learning_rate": 3.87359716479622e-05, | |
| "loss": 0.6693, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 2.1172029972076416, | |
| "learning_rate": 3.696396928529238e-05, | |
| "loss": 0.6641, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.69884, | |
| "eval_loss": 0.6872273683547974, | |
| "eval_runtime": 141.4186, | |
| "eval_samples_per_second": 353.56, | |
| "eval_steps_per_second": 1.846, | |
| "step": 6772 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "grad_norm": 2.3501951694488525, | |
| "learning_rate": 3.5191966922622565e-05, | |
| "loss": 0.6458, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "grad_norm": 1.6436865329742432, | |
| "learning_rate": 3.341996455995275e-05, | |
| "loss": 0.624, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "grad_norm": 1.550398588180542, | |
| "learning_rate": 3.164796219728293e-05, | |
| "loss": 0.6243, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "grad_norm": 3.080960512161255, | |
| "learning_rate": 2.9875959834613114e-05, | |
| "loss": 0.6246, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "grad_norm": 2.6615045070648193, | |
| "learning_rate": 2.81039574719433e-05, | |
| "loss": 0.6246, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "grad_norm": 1.667477011680603, | |
| "learning_rate": 2.633195510927348e-05, | |
| "loss": 0.622, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "grad_norm": 2.1684648990631104, | |
| "learning_rate": 2.4559952746603663e-05, | |
| "loss": 0.62, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.70262, | |
| "eval_loss": 0.6810389161109924, | |
| "eval_runtime": 141.0454, | |
| "eval_samples_per_second": 354.496, | |
| "eval_steps_per_second": 1.85, | |
| "step": 10158 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "grad_norm": 2.221029043197632, | |
| "learning_rate": 2.2787950383933847e-05, | |
| "loss": 0.5931, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "grad_norm": 2.1362805366516113, | |
| "learning_rate": 2.1015948021264028e-05, | |
| "loss": 0.5843, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "grad_norm": 2.233245849609375, | |
| "learning_rate": 1.9243945658594212e-05, | |
| "loss": 0.5826, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "grad_norm": 2.0913615226745605, | |
| "learning_rate": 1.7471943295924396e-05, | |
| "loss": 0.5824, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "grad_norm": 2.6865012645721436, | |
| "learning_rate": 1.5699940933254577e-05, | |
| "loss": 0.5758, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "grad_norm": 2.226073741912842, | |
| "learning_rate": 1.3927938570584761e-05, | |
| "loss": 0.5822, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "grad_norm": 3.137810468673706, | |
| "learning_rate": 1.2155936207914945e-05, | |
| "loss": 0.5852, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.70036, | |
| "eval_loss": 0.6996423602104187, | |
| "eval_runtime": 141.3356, | |
| "eval_samples_per_second": 353.768, | |
| "eval_steps_per_second": 1.847, | |
| "step": 13544 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 16930, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 1.247513087841362e+18, | |
| "train_batch_size": 192, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |