| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.0, | |
| "eval_steps": 1000, | |
| "global_step": 15184, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00039541320680110717, | |
| "grad_norm": 6.574801445007324, | |
| "learning_rate": 0.0, | |
| "loss": 4.3129, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.39541320680110714, | |
| "grad_norm": 6.2015180587768555, | |
| "learning_rate": 9.871541501976284e-06, | |
| "loss": 3.298, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.39541320680110714, | |
| "eval_cosine_accuracy": 0.9471027255058289, | |
| "eval_loss": 0.5520303249359131, | |
| "eval_runtime": 36.3857, | |
| "eval_samples_per_second": 261.339, | |
| "eval_steps_per_second": 1.044, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7908264136022143, | |
| "grad_norm": 8.382867813110352, | |
| "learning_rate": 1.9752964426877474e-05, | |
| "loss": 1.9673, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.7908264136022143, | |
| "eval_cosine_accuracy": 0.953938364982605, | |
| "eval_loss": 0.4995275139808655, | |
| "eval_runtime": 34.9654, | |
| "eval_samples_per_second": 271.955, | |
| "eval_steps_per_second": 1.087, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.1860924535756618, | |
| "grad_norm": 6.899052619934082, | |
| "learning_rate": 1.8930140597539545e-05, | |
| "loss": 1.5646, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.1860924535756618, | |
| "eval_cosine_accuracy": 0.9593017101287842, | |
| "eval_loss": 0.4840952455997467, | |
| "eval_runtime": 34.4403, | |
| "eval_samples_per_second": 276.101, | |
| "eval_steps_per_second": 1.103, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.581193204267088, | |
| "grad_norm": 6.6706132888793945, | |
| "learning_rate": 1.7831722319859405e-05, | |
| "loss": 1.5836, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.581193204267088, | |
| "eval_cosine_accuracy": 0.962666928768158, | |
| "eval_loss": 0.4643610715866089, | |
| "eval_runtime": 34.4996, | |
| "eval_samples_per_second": 275.627, | |
| "eval_steps_per_second": 1.101, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.9762939549585146, | |
| "grad_norm": 6.573458194732666, | |
| "learning_rate": 1.6733304042179265e-05, | |
| "loss": 1.4401, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.9762939549585146, | |
| "eval_cosine_accuracy": 0.9638237357139587, | |
| "eval_loss": 0.44956663250923157, | |
| "eval_runtime": 34.8836, | |
| "eval_samples_per_second": 272.593, | |
| "eval_steps_per_second": 1.089, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.3713947056499407, | |
| "grad_norm": 5.309902191162109, | |
| "learning_rate": 1.5634885764499125e-05, | |
| "loss": 1.2966, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.3713947056499407, | |
| "eval_cosine_accuracy": 0.9671889543533325, | |
| "eval_loss": 0.4553391933441162, | |
| "eval_runtime": 34.4231, | |
| "eval_samples_per_second": 276.239, | |
| "eval_steps_per_second": 1.104, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.7664954563413673, | |
| "grad_norm": 5.066314220428467, | |
| "learning_rate": 1.4538664323374343e-05, | |
| "loss": 1.2287, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 2.7664954563413673, | |
| "eval_cosine_accuracy": 0.9656115174293518, | |
| "eval_loss": 0.443572074174881, | |
| "eval_runtime": 34.6972, | |
| "eval_samples_per_second": 274.057, | |
| "eval_steps_per_second": 1.095, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.1615962070327934, | |
| "grad_norm": 5.241501331329346, | |
| "learning_rate": 1.34402460456942e-05, | |
| "loss": 1.1559, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.1615962070327934, | |
| "eval_cosine_accuracy": 0.9663476943969727, | |
| "eval_loss": 0.44341471791267395, | |
| "eval_runtime": 34.4306, | |
| "eval_samples_per_second": 276.179, | |
| "eval_steps_per_second": 1.104, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.5566969577242196, | |
| "grad_norm": 4.690669059753418, | |
| "learning_rate": 1.234182776801406e-05, | |
| "loss": 1.1011, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 3.5566969577242196, | |
| "eval_cosine_accuracy": 0.9673992991447449, | |
| "eval_loss": 0.4327247738838196, | |
| "eval_runtime": 34.3296, | |
| "eval_samples_per_second": 276.991, | |
| "eval_steps_per_second": 1.107, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 3.951797708415646, | |
| "grad_norm": 8.417939186096191, | |
| "learning_rate": 1.124340949033392e-05, | |
| "loss": 1.0585, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 3.951797708415646, | |
| "eval_cosine_accuracy": 0.9675044417381287, | |
| "eval_loss": 0.43402087688446045, | |
| "eval_runtime": 34.3458, | |
| "eval_samples_per_second": 276.86, | |
| "eval_steps_per_second": 1.106, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.346898459107072, | |
| "grad_norm": 5.533106327056885, | |
| "learning_rate": 1.014608963093146e-05, | |
| "loss": 1.0006, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 4.346898459107072, | |
| "eval_cosine_accuracy": 0.9684509634971619, | |
| "eval_loss": 0.42998796701431274, | |
| "eval_runtime": 35.621, | |
| "eval_samples_per_second": 266.95, | |
| "eval_steps_per_second": 1.067, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 4.741999209798498, | |
| "grad_norm": 5.348298072814941, | |
| "learning_rate": 9.047671353251318e-06, | |
| "loss": 0.9839, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 4.741999209798498, | |
| "eval_cosine_accuracy": 0.9686612486839294, | |
| "eval_loss": 0.42643481492996216, | |
| "eval_runtime": 34.294, | |
| "eval_samples_per_second": 277.279, | |
| "eval_steps_per_second": 1.108, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 5.137099960489925, | |
| "grad_norm": 5.4071149826049805, | |
| "learning_rate": 7.950351493848859e-06, | |
| "loss": 0.9693, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 5.137099960489925, | |
| "eval_cosine_accuracy": 0.9684509634971619, | |
| "eval_loss": 0.4284929931163788, | |
| "eval_runtime": 34.4423, | |
| "eval_samples_per_second": 276.085, | |
| "eval_steps_per_second": 1.103, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 5.532200711181352, | |
| "grad_norm": 7.151297092437744, | |
| "learning_rate": 6.853031634446398e-06, | |
| "loss": 0.9279, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 5.532200711181352, | |
| "eval_cosine_accuracy": 0.9680302739143372, | |
| "eval_loss": 0.43396368622779846, | |
| "eval_runtime": 34.0774, | |
| "eval_samples_per_second": 279.041, | |
| "eval_steps_per_second": 1.115, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 5.927301461872777, | |
| "grad_norm": 4.7381510734558105, | |
| "learning_rate": 5.755711775043937e-06, | |
| "loss": 0.9175, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 5.927301461872777, | |
| "eval_cosine_accuracy": 0.9693974256515503, | |
| "eval_loss": 0.42655229568481445, | |
| "eval_runtime": 34.3744, | |
| "eval_samples_per_second": 276.63, | |
| "eval_steps_per_second": 1.105, | |
| "step": 15000 | |
| } | |
| ], | |
| "logging_steps": 1000, | |
| "max_steps": 20232, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 256, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |