| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.9992093299070963, | |
| "eval_steps": 1000, | |
| "global_step": 15171, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00019774569903104609, | |
| "grad_norm": 5.842831611633301, | |
| "learning_rate": 0.0, | |
| "loss": 0.8464, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.19774569903104608, | |
| "grad_norm": 6.009142875671387, | |
| "learning_rate": 3.290513833992095e-05, | |
| "loss": 0.8746, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.19774569903104608, | |
| "eval_cosine_accuracy": 0.9700284004211426, | |
| "eval_loss": 0.40132495760917664, | |
| "eval_runtime": 35.9259, | |
| "eval_samples_per_second": 264.684, | |
| "eval_steps_per_second": 2.088, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.39549139806209216, | |
| "grad_norm": 5.525256156921387, | |
| "learning_rate": 4.823848238482385e-05, | |
| "loss": 0.8834, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.39549139806209216, | |
| "eval_cosine_accuracy": 0.9702387452125549, | |
| "eval_loss": 0.4074099063873291, | |
| "eval_runtime": 34.7469, | |
| "eval_samples_per_second": 273.665, | |
| "eval_steps_per_second": 2.158, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.5932370970931382, | |
| "grad_norm": 6.155282497406006, | |
| "learning_rate": 4.4576283600673844e-05, | |
| "loss": 0.7973, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.5932370970931382, | |
| "eval_cosine_accuracy": 0.9673992991447449, | |
| "eval_loss": 0.41063427925109863, | |
| "eval_runtime": 34.6206, | |
| "eval_samples_per_second": 274.663, | |
| "eval_steps_per_second": 2.166, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.7909827961241843, | |
| "grad_norm": 7.556669235229492, | |
| "learning_rate": 4.0917747015308e-05, | |
| "loss": 0.5365, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.7909827961241843, | |
| "eval_cosine_accuracy": 0.9680302739143372, | |
| "eval_loss": 0.3832950294017792, | |
| "eval_runtime": 34.6136, | |
| "eval_samples_per_second": 274.718, | |
| "eval_steps_per_second": 2.167, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.9887284951552304, | |
| "grad_norm": 11.179519653320312, | |
| "learning_rate": 3.725554823115799e-05, | |
| "loss": 0.4558, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.9887284951552304, | |
| "eval_cosine_accuracy": 0.9672941565513611, | |
| "eval_loss": 0.37456464767456055, | |
| "eval_runtime": 34.7639, | |
| "eval_samples_per_second": 273.531, | |
| "eval_steps_per_second": 2.157, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.1864004744020558, | |
| "grad_norm": 5.116372108459473, | |
| "learning_rate": 3.3597011645792136e-05, | |
| "loss": 0.6229, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.1864004744020558, | |
| "eval_cosine_accuracy": 0.9699232578277588, | |
| "eval_loss": 0.3871650695800781, | |
| "eval_runtime": 36.4186, | |
| "eval_samples_per_second": 261.103, | |
| "eval_steps_per_second": 2.059, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.3840679976279897, | |
| "grad_norm": 2.322946786880493, | |
| "learning_rate": 2.993481286164213e-05, | |
| "loss": 0.5929, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.3840679976279897, | |
| "eval_cosine_accuracy": 0.970974862575531, | |
| "eval_loss": 0.38374313712120056, | |
| "eval_runtime": 34.6091, | |
| "eval_samples_per_second": 274.754, | |
| "eval_steps_per_second": 2.167, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.5817355208539237, | |
| "grad_norm": 4.910085201263428, | |
| "learning_rate": 2.6276276276276278e-05, | |
| "loss": 0.5784, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.5817355208539237, | |
| "eval_cosine_accuracy": 0.9697129130363464, | |
| "eval_loss": 0.38738808035850525, | |
| "eval_runtime": 34.7172, | |
| "eval_samples_per_second": 273.899, | |
| "eval_steps_per_second": 2.16, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.7794030440798578, | |
| "grad_norm": 5.227392673492432, | |
| "learning_rate": 2.2614077492126272e-05, | |
| "loss": 0.5687, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.7794030440798578, | |
| "eval_cosine_accuracy": 0.9693974256515503, | |
| "eval_loss": 0.388067364692688, | |
| "eval_runtime": 34.4335, | |
| "eval_samples_per_second": 276.156, | |
| "eval_steps_per_second": 2.178, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.9770705673057916, | |
| "grad_norm": 4.977433204650879, | |
| "learning_rate": 1.895554090676042e-05, | |
| "loss": 0.5546, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.9770705673057916, | |
| "eval_cosine_accuracy": 0.9701335430145264, | |
| "eval_loss": 0.38536983728408813, | |
| "eval_runtime": 34.7416, | |
| "eval_samples_per_second": 273.706, | |
| "eval_steps_per_second": 2.159, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.1747380905317257, | |
| "grad_norm": 4.737555027008057, | |
| "learning_rate": 1.5293342122610414e-05, | |
| "loss": 0.5081, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.1747380905317257, | |
| "eval_cosine_accuracy": 0.9696077108383179, | |
| "eval_loss": 0.39179283380508423, | |
| "eval_runtime": 35.2754, | |
| "eval_samples_per_second": 269.565, | |
| "eval_steps_per_second": 2.126, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.3724056137576595, | |
| "grad_norm": 4.499185562133789, | |
| "learning_rate": 1.1631143338460412e-05, | |
| "loss": 0.4974, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.3724056137576595, | |
| "eval_cosine_accuracy": 0.9681354761123657, | |
| "eval_loss": 0.39879927039146423, | |
| "eval_runtime": 35.0147, | |
| "eval_samples_per_second": 271.572, | |
| "eval_steps_per_second": 2.142, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.5700731369835936, | |
| "grad_norm": 4.642030715942383, | |
| "learning_rate": 7.968944554310408e-06, | |
| "loss": 0.4847, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.5700731369835936, | |
| "eval_cosine_accuracy": 0.9686612486839294, | |
| "eval_loss": 0.3989247679710388, | |
| "eval_runtime": 35.0018, | |
| "eval_samples_per_second": 271.672, | |
| "eval_steps_per_second": 2.143, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.767740660209528, | |
| "grad_norm": 4.43612003326416, | |
| "learning_rate": 4.310407968944555e-06, | |
| "loss": 0.4906, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.767740660209528, | |
| "eval_cosine_accuracy": 0.9686612486839294, | |
| "eval_loss": 0.3967738747596741, | |
| "eval_runtime": 35.1434, | |
| "eval_samples_per_second": 270.577, | |
| "eval_steps_per_second": 2.134, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.9654081834354615, | |
| "grad_norm": 4.728808879852295, | |
| "learning_rate": 6.482091847945506e-07, | |
| "loss": 0.4831, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 2.9654081834354615, | |
| "eval_cosine_accuracy": 0.9696077108383179, | |
| "eval_loss": 0.39127206802368164, | |
| "eval_runtime": 34.9964, | |
| "eval_samples_per_second": 271.714, | |
| "eval_steps_per_second": 2.143, | |
| "step": 15000 | |
| } | |
| ], | |
| "logging_steps": 1000, | |
| "max_steps": 15171, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 128, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |